Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
117 changes: 117 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -521,6 +521,7 @@ commands:
- store_artifacts:
path: /tmp/testlogs

# =================== FX tests start ======================== #
test-fx_core:
description: "Test the fx core"
steps:
Expand Down Expand Up @@ -720,6 +721,61 @@ commands:
- store_artifacts:
path: /tmp/testlogs

# =================== FX tests end ======================== #

# =================== Dynamo tests start ======================== #
test-dynamo-fx_ts:
description: "Test the Dynamo fx_ts_compat path"
steps:
- run:
name: Run Dynamo fx_ts_compat core tests
command: |
cd py/torch_tensorrt/dynamo/fx_ts_compat/test
pushd core/
pytest --junitxml=/tmp/artifacts/test_results/dynamo/fx_ts_compat/test_results.xml
popd

- store_test_results:
path: /tmp/artifacts
- store_artifacts:
path: /tmp/testlogs

test-dynamo-torch_compile-core:
description: "Test the Dynamo torch_compile path"
steps:
- run:
name: Run Dynamo torch_compile core tests
command: |
cd py/torch_tensorrt/dynamo/torch_compile
pushd test/
pytest --junitxml=/tmp/artifacts/test_results/dynamo/torch_compile/test_results.xml
popd

- store_test_results:
path: /tmp/artifacts
- store_artifacts:
path: /tmp/testlogs

test-dynamo-torch_compile:
description: "Test the Dynamo torch_compile path"
steps:
- run:
name: Run Dynamo torch_compile E2E tests
command: |
cd py/torch_tensorrt/dynamo/
pushd test/
pip3 install timm
pip3 install transformers
pytest --junitxml=/tmp/artifacts/test_results/dynamo/torch_compile/test_results.xml --ir torch_compile
popd

- store_test_results:
path: /tmp/artifacts
- store_artifacts:
path: /tmp/testlogs

# =================== Dynamo tests end ======================== #

# Define a job to be invoked later in a workflow.
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
jobs:
Expand Down Expand Up @@ -911,6 +967,43 @@ jobs:
- dump-test-env
- test-fx-no-aten

test-py-dynamo-x86_64-linux:
parameters:
torch-build:
type: string
torch-build-index:
type: string
trt-version-long:
type: string
python-version:
type: string
machine:
image: linux-cuda-11:2023.02.1
resource_class: gpu.nvidia.large
steps:
- checkout
- setup-py-version:
python-version: << parameters.python-version >>
- attach_workspace:
at: /tmp/dist/
- install-torch-from-index:
torch-build: << parameters.torch-build >>
torch-build-index: << parameters.torch-build-index >>
- create-py-env:
trt-version-long: << parameters.trt-version-long >>
- install-cudnn
# - run:
# name: "Set LD_LIBRARY_PATH path to include the installed CUDNN"
# command: export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH
- run:
name: "Install torch-tensorrt"
command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl
# We install torch after torch-trt because pip automatically enforces the version constraint otherwise
- dump-test-env
- test-dynamo-torch_compile
- test-dynamo-torch_compile-core
- test-dynamo-fx_ts

package-x86_64-linux:
parameters:
enabled:
Expand Down Expand Up @@ -1300,6 +1393,14 @@ workflows:
requires:
- build-x86_64-linux

- test-py-dynamo-x86_64-linux:
torch-build: << pipeline.parameters.torch-build >>
torch-build-index: << pipeline.parameters.torch-build-index >>
trt-version-long: << pipeline.parameters.trt-version-long >>
python-version: << pipeline.parameters.python-version >>
requires:
- build-x86_64-linux

- build-x86_64-linux:
name: build-x86_64-linux-legacy
torch-build: << pipeline.parameters.torch-build-legacy >>
Expand Down Expand Up @@ -1374,6 +1475,14 @@ workflows:
requires:
- package-x86_64-linux

- test-py-dynamo-x86_64-linux:
torch-build: << pipeline.parameters.torch-build >>
torch-build-index: << pipeline.parameters.torch-build-index >>
trt-version-long: << pipeline.parameters.trt-version-long >>
python-version: << pipeline.parameters.python-version >>
requires:
- package-x86_64-linux

on-push:
jobs:
- build-x86_64-linux:
Expand Down Expand Up @@ -1407,6 +1516,14 @@ workflows:
requires:
- build-x86_64-linux

- test-py-dynamo-x86_64-linux:
torch-build: << pipeline.parameters.torch-build >>
torch-build-index: << pipeline.parameters.torch-build-index >>
trt-version-long: << pipeline.parameters.trt-version-long >>
python-version: << pipeline.parameters.python-version >>
requires:
- build-x86_64-linux

- build-x86_64-linux-cmake:
torch-build: << pipeline.parameters.torch-build >>
torch-build-index: << pipeline.parameters.torch-build-index >>
Expand Down
67 changes: 41 additions & 26 deletions py/setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -362,6 +362,10 @@ def run(self):
"torch_tensorrt.fx.tools",
"torch_tensorrt.fx.tracer.acc_tracer",
"torch_tensorrt.fx.tracer.dispatch_tracer",
"torch_tensorrt.dynamo",
"torch_tensorrt.dynamo.fx_ts_compat",
"torch_tensorrt.dynamo.fx_ts_compat.passes",
"torch_tensorrt.dynamo.fx_ts_compat.tools",
]
package_dir = {
"torch_tensorrt.fx": "torch_tensorrt/fx",
Expand All @@ -370,11 +374,47 @@ def run(self):
"torch_tensorrt.fx.tools": "torch_tensorrt/fx/tools",
"torch_tensorrt.fx.tracer.acc_tracer": "torch_tensorrt/fx/tracer/acc_tracer",
"torch_tensorrt.fx.tracer.dispatch_tracer": "torch_tensorrt/fx/tracer/dispatch_tracer",
"torch_tensorrt.dynamo": "torch_tensorrt/dynamo",
"torch_tensorrt.dynamo.fx_ts_compat": "torch_tensorrt/dynamo/fx_ts_compat",
"torch_tensorrt.dynamo.fx_ts_compat.passes": "torch_tensorrt/dynamo/fx_ts_compat/passes",
"torch_tensorrt.dynamo.fx_ts_compat.tools": "torch_tensorrt/dynamo/fx_ts_compat/tools",
}

with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()

if FX_ONLY:
package_data_list = [
"_Input.py",
]
else:
package_data_list = [
"lib/*",
"include/torch_tensorrt/*.h",
"include/torch_tensorrt/core/*.h",
"include/torch_tensorrt/core/conversion/*.h",
"include/torch_tensorrt/core/conversion/conversionctx/*.h",
"include/torch_tensorrt/core/conversion/converters/*.h",
"include/torch_tensorrt/core/conversion/evaluators/*.h",
"include/torch_tensorrt/core/conversion/tensorcontainer/*.h",
"include/torch_tensorrt/core/conversion/var/*.h",
"include/torch_tensorrt/core/ir/*.h",
"include/torch_tensorrt/core/lowering/*.h",
"include/torch_tensorrt/core/lowering/passes/*.h",
"include/torch_tensorrt/core/partitioning/*.h",
"include/torch_tensorrt/core/partitioning/segmentedblock/*.h",
"include/torch_tensorrt/core/partitioning/partitioninginfo/*.h",
"include/torch_tensorrt/core/partitioning/partitioningctx/*.h",
"include/torch_tensorrt/core/plugins/*.h",
"include/torch_tensorrt/core/plugins/impl/*.h",
"include/torch_tensorrt/core/runtime/*.h",
"include/torch_tensorrt/core/util/*.h",
"include/torch_tensorrt/core/util/logging/*.h",
"bin/*",
"BUILD",
"WORKSPACE",
]

setup(
name="torch_tensorrt",
version=__version__,
Expand Down Expand Up @@ -418,32 +458,7 @@ def run(self):
python_requires=">=3.8",
include_package_data=True,
package_data={
"torch_tensorrt": [
"lib/*",
"include/torch_tensorrt/*.h",
"include/torch_tensorrt/core/*.h",
"include/torch_tensorrt/core/conversion/*.h",
"include/torch_tensorrt/core/conversion/conversionctx/*.h",
"include/torch_tensorrt/core/conversion/converters/*.h",
"include/torch_tensorrt/core/conversion/evaluators/*.h",
"include/torch_tensorrt/core/conversion/tensorcontainer/*.h",
"include/torch_tensorrt/core/conversion/var/*.h",
"include/torch_tensorrt/core/ir/*.h",
"include/torch_tensorrt/core/lowering/*.h",
"include/torch_tensorrt/core/lowering/passes/*.h",
"include/torch_tensorrt/core/partitioning/*.h",
"include/torch_tensorrt/core/partitioning/segmentedblock/*.h",
"include/torch_tensorrt/core/partitioning/partitioninginfo/*.h",
"include/torch_tensorrt/core/partitioning/partitioningctx/*.h",
"include/torch_tensorrt/core/plugins/*.h",
"include/torch_tensorrt/core/plugins/impl/*.h",
"include/torch_tensorrt/core/runtime/*.h",
"include/torch_tensorrt/core/util/*.h",
"include/torch_tensorrt/core/util/logging/*.h",
"bin/*",
"BUILD",
"WORKSPACE",
],
"torch_tensorrt": package_data_list,
},
exclude_package_data={
"": ["*.cpp"],
Expand Down
34 changes: 24 additions & 10 deletions py/torch_tensorrt/_Device.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,17 @@
import torch

from torch_tensorrt import _enums
# from torch_tensorrt import _enums
import tensorrt as trt
from torch_tensorrt import logging
from torch_tensorrt import _C

import warnings

try:
from torch_tensorrt import _C
except:
warnings.warn(
"Unable to import torchscript frontend core and torch-tensorrt runtime. Some dependent features may be unavailable."
)


class Device(object):
"""
Expand Down Expand Up @@ -51,7 +57,7 @@ def __init__(self, *args, **kwargs):
)
else:
(self.device_type, id) = Device._parse_device_str(args[0])
if self.device_type == _enums.DeviceType.GPU:
if self.device_type == trt.DeviceType.GPU:
self.gpu_id = id
else:
self.dla_core = id
Expand All @@ -64,7 +70,7 @@ def __init__(self, *args, **kwargs):
elif len(args) == 0:
if "gpu_id" in kwargs or "dla_core" in kwargs:
if "dla_core" in kwargs:
self.device_type = _enums.DeviceType.DLA
self.device_type = trt.DeviceType.DLA
self.dla_core = kwargs["dla_core"]
if "gpu_id" in kwargs:
self.gpu_id = kwargs["gpu_id"]
Expand All @@ -76,7 +82,7 @@ def __init__(self, *args, **kwargs):
)
else:
self.gpu_id = kwargs["gpu_id"]
self.device_type = _enums.DeviceType.GPU
self.device_type = trt.DeviceType.GPU
else:
raise ValueError(
"Either gpu_id or dla_core or both must be defined if no string with device specs is provided as an arg"
Expand All @@ -97,15 +103,23 @@ def __init__(self, *args, **kwargs):
def __str__(self) -> str:
return (
"Device(type={}, gpu_id={}".format(self.device_type, self.gpu_id) + ")"
if self.device_type == _enums.DeviceType.GPU
if self.device_type == trt.DeviceType.GPU
else ", dla_core={}, allow_gpu_fallback={}".format(
self.dla_core, self.allow_gpu_fallback
)
)

def _to_internal(self) -> _C.Device:
internal_dev = _C.Device()
internal_dev.device_type = self.device_type
if self.device_type == trt.DeviceType.GPU:
internal_dev.device_type = _C.DeviceType.GPU
elif self.device_type == trt.DeviceType.DLA:
internal_dev.device_type = _C.DeviceType.DLA
else:
raise ValueError(
"Invalid DeviceType detected while parsing the Device class"
)

internal_dev.gpu_id = self.gpu_id
internal_dev.dla_core = self.dla_core
internal_dev.allow_gpu_fallback = self.allow_gpu_fallback
Expand Down Expand Up @@ -136,6 +150,6 @@ def _parse_device_str(s):
s = s.lower()
spec = s.split(":")
if spec[0] == "gpu" or spec[0] == "cuda":
return (_enums.DeviceType.GPU, int(spec[1]))
return (trt.DeviceType.GPU, int(spec[1]))
elif spec[0] == "dla":
return (_enums.DeviceType.DLA, int(spec[1]))
return (trt.DeviceType.DLA, int(spec[1]))
Loading