Skip to content

Commit 5f84977

Browse files
authored
Merge pull request #59 from NVIDIA/python_api
Implementation of a Python API for TRTorch
2 parents 227dea3 + 639c2a3 commit 5f84977

File tree

379 files changed

+84670
-1354
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

379 files changed

+84670
-1354
lines changed

.bazelrc

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414
# limitations under the License.
1515
#
1616
# File: DL4AGX/.bazelrc
17-
# Description: Default bazel settings and toolchain configuration
17+
# Description: Default bazel settings and toolchain configuration
1818
##########################################################################
1919

2020
# +------------------------------------------------------------+
@@ -24,3 +24,12 @@
2424
build --cxxopt="-fdiagnostics-color=always"
2525
build --cxxopt='-std=c++14'
2626
#build --linkopt="-Wl,--no-as-needed"
27+
28+
29+
build:python --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
30+
build:python --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
31+
build:python --define=abi=pre_cxx11_abi
32+
33+
build:pre_cxx11_abi --cxxopt="-D_GLIBCXX_USE_CXX11_ABI=0"
34+
build:pre_cxx11_abi --linkopt="-D_GLIBCXX_USE_CXX11_ABI=0"
35+
build:pre_cxx11_abi --define=abi=pre_cxx11_abi

.gitignore

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,5 +26,13 @@ tests/accuracy/datasets/data/*
2626
*.tar.gz
2727
*.tgz
2828
docsrc/_build
29-
docsrc/_api
30-
docsrc/_tmp
29+
docsrc/_cpp_api
30+
docsrc/_tmp
31+
*.so
32+
__pycache__
33+
*.egg-info
34+
dist
35+
bdist
36+
py/trtorch/_version.py
37+
py/wheelhouse
38+
py/.eggs

README.md

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,16 @@ A tarball with the include files and library can then be found in bazel-bin
137137
bazel run //cpp/trtorchexec -- $(realpath <PATH TO GRAPH>) <input-size>
138138
```
139139

140+
## Compiling the Python Package
141+
142+
To compile the python package for your local machine, just run `python3 setup.py install` in the `//py` directory.
143+
To build wheel files for different python versions, first build the Dockerfile in ``//py`` then run the following
144+
command
145+
```
146+
docker run -it -v$(pwd)/..:/workspace/TRTorch build_trtorch_wheel /bin/bash /workspace/TRTorch/py/build_whl.sh
147+
```
148+
Python compilation expects using the tarball based compilation strategy from above.
149+
140150
## How do I add support for a new op...
141151

142152
### In TRTorch?

WORKSPACE

Lines changed: 32 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -3,17 +3,17 @@ workspace(name = "TRTorch")
33
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
44
load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository")
55

6-
7-
http_archive(
6+
git_repository(
87
name = "rules_python",
9-
url = "https://github.com/bazelbuild/rules_python/releases/download/0.0.1/rules_python-0.0.1.tar.gz",
10-
sha256 = "aa96a691d3a8177f3215b14b0edc9641787abaaa30363a080165d06ab65e1161",
8+
remote = "https://github.com/bazelbuild/rules_python.git",
9+
commit = "4fcc24fd8a850bdab2ef2e078b1de337eea751a6",
10+
shallow_since = "1589292086 -0400"
1111
)
1212

1313
load("@rules_python//python:repositories.bzl", "py_repositories")
1414
py_repositories()
15-
# Only needed if using the packaging rules.
16-
load("@rules_python//python:pip.bzl", "pip_repositories", "pip_import")
15+
16+
load("@rules_python//python:pip.bzl", "pip_repositories", "pip3_import")
1717
pip_repositories()
1818

1919
http_archive(
@@ -32,6 +32,14 @@ new_local_repository(
3232
build_file = "@//third_party/cuda:BUILD",
3333
)
3434

35+
http_archive(
36+
name = "libtorch_pre_cxx11_abi",
37+
build_file = "@//third_party/libtorch:BUILD",
38+
strip_prefix = "libtorch",
39+
sha256 = "ea8de17c5f70015583f3a7a43c7a5cdf91a1d4bd19a6a7bc11f074ef6cd69e27",
40+
urls = ["https://download.pytorch.org/libtorch/cu102/libtorch-shared-with-deps-1.5.0.zip"],
41+
)
42+
3543
http_archive(
3644
name = "libtorch",
3745
build_file = "@//third_party/libtorch:BUILD",
@@ -40,20 +48,34 @@ http_archive(
4048
sha256 = "0efdd4e709ab11088fa75f0501c19b0e294404231442bab1d1fb953924feb6b5"
4149
)
4250

51+
pip3_import(
52+
name = "trtorch_py_deps",
53+
requirements = "//py:requirements.txt"
54+
)
55+
56+
load("@trtorch_py_deps//:requirements.bzl", "pip_install")
57+
pip_install()
58+
59+
pip3_import(
60+
name = "py_test_deps",
61+
requirements = "//tests/py:requirements.txt"
62+
)
63+
64+
load("@py_test_deps//:requirements.bzl", "pip_install")
65+
pip_install()
66+
4367
# Downloaded distributions to use with --distdir
4468
http_archive(
4569
name = "cudnn",
46-
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/7.6.5.32/Production/10.2_20191118/cudnn-10.2-linux-x64-v7.6.5.32.tgz",],
47-
70+
urls = ["https://developer.nvidia.com/compute/machine-learning/cudnn/secure/7.6.5.32/Production/10.2_20191118/cudnn-10.2-linux-x64-v7.6.5.32.tgz"],
4871
build_file = "@//third_party/cudnn/archive:BUILD",
4972
sha256 = "600267f2caaed2fd58eb214ba669d8ea35f396a7d19b94822e6b36f9f7088c20",
5073
strip_prefix = "cuda"
5174
)
5275

5376
http_archive(
5477
name = "tensorrt",
55-
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.0/7.0.0.11/tars/TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz",],
56-
78+
urls = ["https://developer.nvidia.com/compute/machine-learning/tensorrt/secure/7.0/7.0.0.11/tars/TensorRT-7.0.0.11.Ubuntu-18.04.x86_64-gnu.cuda-10.2.cudnn7.6.tar.gz"],
5779
build_file = "@//third_party/tensorrt/archive:BUILD",
5880
sha256 = "c7d73b2585b18aae68b740249efa8c8ba5ae852abe9a023720595432a8eb4efd",
5981
strip_prefix = "TensorRT-7.0.0.11"

core/BUILD

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
package(default_visibility = ["//visibility:public"])
22

3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
310
cc_library(
411
name = "core",
512
hdrs = [
@@ -13,9 +20,11 @@ cc_library(
1320
"//core/execution",
1421
"//core/lowering",
1522
"//core/util/logging",
16-
"@libtorch//:libtorch",
1723
"@tensorrt//:nvinfer"
18-
],
24+
] + select({
25+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
26+
"//conditions:default": ["@libtorch//:libtorch"],
27+
}),
1928
alwayslink=True,
2029
)
2130

core/conversion/BUILD

Lines changed: 11 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
package(default_visibility = ["//visibility:public"])
22

3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
310
cc_library(
411
name = "conversion",
512
hdrs = [
@@ -8,17 +15,18 @@ cc_library(
815
srcs = [
916
"conversion.cpp",
1017
"conversion_blacklist.cpp",
11-
"string_to_type_lut.cpp",
1218
"InterfaceTypes.cpp"
1319
],
1420
deps = [
1521
"@tensorrt//:nvinfer",
16-
"@libtorch//:libtorch",
1722
"//core/conversion/conversionctx",
1823
"//core/conversion/converters",
1924
"//core/conversion/evaluators",
2025
"//core/util:prelude"
21-
]
26+
] + select({
27+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
28+
"//conditions:default": ["@libtorch//:libtorch"],
29+
}),
2230
)
2331

2432
load("@rules_pkg//:pkg.bzl", "pkg_tar")

core/conversion/InterfaceTypes.cpp

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,9 @@ namespace conversion {
1010
GraphParams get_named_params(c10::ArrayRef<torch::jit::Value*> inputs,
1111
std::vector<at::Tensor> params) {
1212
GraphParams named_params;
13-
auto type_lut = torch::jit::script::string_to_type_lut();
1413
auto param_it = params.begin();
1514
for (auto in : inputs) {
16-
if (in->type() != type_lut["Tensor"] \
15+
if (in->type() != c10::TensorType::get() \
1716
&& in->isCompleteTensor() && param_it != params.end()) {
1817
named_params[in] = *param_it;
1918
++param_it;
@@ -35,7 +34,7 @@ InputRange::InputRange(std::vector<int64_t> d) {
3534
min = util::toDims(d);
3635
max = util::toDims(d);
3736
input_shape = util::toDims(d);
38-
37+
3938
}
4039

4140

@@ -48,14 +47,14 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_
4847
sizes.insert(min_shape.size());
4948
sizes.insert(opt_shape.size());
5049
sizes.insert(max_shape.size());
51-
50+
5251
if (sizes.size() != 1) {
5352
LOG_ERROR("Expected all input sizes have the same dimensions, but found dimensions: min(" \
5453
<< min_shape.size() << "), opt("
5554
<< opt_shape.size() << "), max("
5655
<< max_shape.size() << ")");
5756
}
58-
57+
5958
min = util::toDimsPad(min_shape, 4);
6059
opt = util::toDimsPad(opt_shape, 4);
6160
max = util::toDimsPad(max_shape, 4);
@@ -72,9 +71,9 @@ InputRange::InputRange(std::vector<int64_t> min_shape, std::vector<int64_t> opt_
7271
dyn_shape.push_back(opt_shape[i]);
7372
}
7473
}
75-
74+
7675
input_shape = util::toDimsPad(dyn_shape, 4);
77-
76+
7877
}
7978

8079
} // namespace conversion

core/conversion/conversion.cpp

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -117,16 +117,14 @@ void AddInputs(ConversionCtx* ctx,
117117
at::ArrayRef<const torch::jit::Value*> inputs,
118118
std::vector<InputRange>& input_dims) {
119119

120-
auto type_lut = torch::jit::script::string_to_type_lut();
121120
std::vector<const torch::jit::Value*> input_tensors;
122121
for (auto in : inputs) {
123122
// Disregarding inputs that are not tensors
124123
//
125124
// Ex.
126125
// self.1:__torch__.alexnet -> ignored
127126
// input.1:Tensor -> used
128-
auto pt_type = in->type();
129-
if (pt_type == type_lut["Tensor"]) {
127+
if (in->type()->isSubtypeOf(c10::TensorType::get()) && ctx->evaluated_value_map.find(in) == ctx->evaluated_value_map.end()) {
130128
input_tensors.push_back(in);
131129
}
132130
}

core/conversion/conversion.h

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,14 +6,6 @@
66
#include "torch/csrc/jit/ir/ir.h"
77
#include "core/conversion/conversionctx/ConversionCtx.h"
88

9-
namespace torch {
10-
namespace jit {
11-
namespace script {
12-
const std::unordered_map<std::string, c10::TypePtr>& string_to_type_lut();
13-
}
14-
}
15-
}
16-
179
namespace trtorch {
1810
namespace core {
1911
namespace conversion {

core/conversion/conversion_blacklist.cpp

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ const std::unordered_set<std::string>& get_non_convertable_nodes() {
1919
"prim::device",
2020
"prim::GetAttr",
2121
"prim::CallMethod",
22+
"prim::Drop",
2223
"aten:dropout",
2324
};
2425
return nonconvertable_nodes;

core/conversion/conversionctx/BUILD

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
package(default_visibility = ["//visibility:public"])
22

3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
310
cc_library(
411
name = "conversionctx",
512
hdrs = [
@@ -10,9 +17,11 @@ cc_library(
1017
],
1118
deps = [
1219
"@tensorrt//:nvinfer",
13-
"@libtorch//:libtorch",
1420
"//core/util:prelude",
15-
]
21+
] + select({
22+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
23+
"//conditions:default": ["@libtorch//:libtorch"],
24+
}),
1625
)
1726

1827
load("@rules_pkg//:pkg.bzl", "pkg_tar")

core/conversion/converters/BUILD

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
package(default_visibility = ["//visibility:public"])
22

3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
310
cc_library(
411
name = "converters",
512
hdrs = [
@@ -24,11 +31,13 @@ cc_library(
2431
"impl/unary.cpp",
2532
],
2633
deps = [
27-
"@libtorch//:libtorch",
2834
"@tensorrt//:nvinfer",
2935
"//core/util:prelude",
30-
"//core/conversion/conversionctx"
31-
],
36+
"//core/conversion/conversionctx",
37+
] + select({
38+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
39+
"//conditions:default": ["@libtorch//:libtorch"],
40+
}),
3241
alwayslink = True,
3342
)
3443

core/conversion/evaluators/BUILD

Lines changed: 11 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,12 @@
11
package(default_visibility = ["//visibility:public"])
22

3+
config_setting(
4+
name = "use_pre_cxx11_abi",
5+
values = {
6+
"define": "abi=pre_cxx11_abi",
7+
}
8+
)
9+
310
cc_library(
411
name = "evaluators",
512
hdrs = [
@@ -10,9 +17,11 @@ cc_library(
1017
"prim.cpp",
1118
],
1219
deps = [
13-
"@libtorch//:libtorch",
1420
"//core/util:prelude",
15-
],
21+
] + select({
22+
":use_pre_cxx11_abi": ["@libtorch_pre_cxx11_abi//:libtorch"],
23+
"//conditions:default": ["@libtorch//:libtorch"],
24+
}),
1625
alwayslink = True,
1726
)
1827

0 commit comments

Comments
 (0)