Skip to content

Commit

Permalink
Update the onnx-tensorrt submodule - CI to TRT7 (apache#18574)
Browse files Browse the repository at this point in the history
  • Loading branch information
Kh4L authored Aug 4, 2020
1 parent 7f2e314 commit 95fa63f
Show file tree
Hide file tree
Showing 7 changed files with 33 additions and 21 deletions.
2 changes: 1 addition & 1 deletion 3rdparty/onnx-tensorrt
Submodule onnx-tensorrt updated 63 files
+1 −0 .gitmodules
+46 −177 CMakeLists.txt
+0 −91 Dockerfile
+0 −101 FancyActivation.cu
+0 −142 FancyActivation.hpp
+187 −70 ImporterContext.hpp
+0 −185 InstanceNormalization.cpp
+0 −133 InstanceNormalization.hpp
+607 −517 ModelImporter.cpp
+60 −46 ModelImporter.hpp
+8 −6 NvOnnxParser.cpp
+46 −21 NvOnnxParser.h
+0 −29 NvOnnxParserRuntime.cpp
+0 −85 NvOnnxParserRuntime.h
+0 −30 NvOnnxParserTypedefs.h
+306 −56 OnnxAttrs.cpp
+37 −21 OnnxAttrs.hpp
+0 −57 PluginFactory.cpp
+0 −59 PluginFactory.hpp
+56 −35 README.md
+0 −120 ResizeNearest.cu
+0 −108 ResizeNearest.hpp
+361 −0 ShapeTensor.cpp
+155 −0 ShapeTensor.hpp
+85 −99 ShapedWeights.cpp
+17 −19 ShapedWeights.hpp
+0 −133 Split.cu
+0 −112 Split.hpp
+175 −78 Status.hpp
+78 −40 TensorOrWeights.hpp
+3,646 −1,808 builtin_op_importers.cpp
+2 −1 builtin_op_importers.hpp
+0 −38 builtin_plugins.cpp
+0 −32 builtin_plugins.hpp
+25 −1 common.hpp
+1 −7 contributing.md
+70 −0 docker/onnx-tensorrt-deb.Dockerfile
+80 −0 docker/onnx-tensorrt-tar.Dockerfile
+6 −1 getSupportedAPITest.cpp
+0 −9 libnvonnxparser_runtime.version
+66 −3 main.cpp
+0 −60 nv_onnx_runtime_bindings.i
+32 −17 onnx2trt.hpp
+43 −30 onnx2trt_common.hpp
+3 −3 onnx2trt_runtime.hpp
+1,684 −54 onnx2trt_utils.cpp
+236 −375 onnx2trt_utils.hpp
+155 −150 onnx_backend_test.py
+27 −49 onnx_tensorrt/backend.py
+30 −0 onnx_tensorrt/config.py
+64 −78 onnx_tensorrt/tensorrt_engine.py
+53 −10 onnx_trt_backend.cpp
+130 −44 onnx_utils.hpp
+162 −138 operators.md
+0 −175 plugin.cpp
+0 −183 plugin.hpp
+0 −27 plugin_common.hpp
+0 −125 serialize.hpp
+30 −14 setup.py
+1 −1 third_party/onnx
+73 −56 toposort.hpp
+149 −198 trt_utils.hpp
+1 −1 utils.hpp
8 changes: 3 additions & 5 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -238,6 +238,7 @@ if(USE_TENSORRT)
include_directories(3rdparty/onnx-tensorrt/third_party/onnx/)
add_definitions(-DMXNET_USE_TENSORRT=1)
add_definitions(-DONNX_NAMESPACE=onnx)
add_definitions(-DONNX_ML=1)

find_package(Protobuf REQUIRED)

Expand All @@ -247,14 +248,11 @@ if(USE_TENSORRT)
find_library(ONNX_PROTO_LIBRARY NAMES libonnx_proto.so REQUIRED
PATHS ${ONNX_PATH}
DOC "Path to onnx_proto library.")
find_library(ONNX_TRT_RUNTIME_LIBRARY NAMES libnvonnxparser_runtime.so REQUIRED
PATHS ${ONNX_TRT_PATH}
DOC "Path to onnx_proto library.")
find_library(ONNX_TRT_PARSER_LIBRARY NAMES libnvonnxparser.so REQUIRED
PATHS ${ONNX_TRT_PATH}
DOC "Path to onnx_proto library.")
DOC "Path to onnx_proto parser library.")

list(APPEND mxnet_LINKER_LIBS libnvinfer.so ${ONNX_TRT_PARSER_LIBRARY} ${ONNX_TRT_RUNTIME_LIBRARY}
list(APPEND mxnet_LINKER_LIBS libnvinfer.so ${ONNX_TRT_PARSER_LIBRARY}
${ONNX_PROTO_LIBRARY} ${ONNX_LIBRARY} ${PROTOBUF_LIBRARY})
endif()

Expand Down
13 changes: 10 additions & 3 deletions ci/docker/Dockerfile.build.ubuntu
Original file line number Diff line number Diff line change
Expand Up @@ -139,15 +139,22 @@ ARG BASE_IMAGE
RUN export SHORT_CUDA_VERSION=${CUDA_VERSION%.*} && \
apt-get update && \
if [ ${SHORT_CUDA_VERSION} = 10.0 ]; then \
apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.0"; \
TRT_VERSION="7.0.0-1+cuda10.0"; \
TRT_MAJOR_VERSION=7; \
elif [ ${SHORT_CUDA_VERSION} = 10.1 ]; then \
apt-get install -y "libnvinfer-dev=5.1.5-1+cuda10.1"; \
TRT_VERSION="6.0.1-1+cuda10.1"; \
TRT_MAJOR_VERSION=6; \
elif [ ${SHORT_CUDA_VERSION} = 10.2 ]; then \
apt-get install -y "libnvinfer-dev=6.0.1-1+cuda10.2"; \
TRT_VERSION="7.0.0-1+cuda10.2"; \
TRT_MAJOR_VERSION=7; \
else \
echo "ERROR: Cuda ${SHORT_CUDA_VERSION} not yet supported in Dockerfile.build.ubuntu"; \
exit 1; \
fi && \
apt-get install -y libnvinfer${TRT_MAJOR_VERSION}=${TRT_VERSION} \
libnvinfer-dev=${TRT_VERSION} \
libnvinfer-plugin${TRT_MAJOR_VERSION}=${TRT_VERSION} \
libnvinfer-plugin-dev=${TRT_VERSION} && \
rm -rf /var/lib/apt/lists/*

FROM gpu as gpuwithcudaruntimelibs
Expand Down
10 changes: 10 additions & 0 deletions ci/docker/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,16 @@ services:
BASE_IMAGE: nvidia/cuda:10.1-cudnn7-devel-ubuntu18.04
cache_from:
- ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu101:latest
ubuntu_gpu_cu102:
image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu102:latest
build:
context: .
dockerfile: Dockerfile.build.ubuntu
target: gpu
args:
BASE_IMAGE: nvidia/cuda:10.2-cudnn7-devel-ubuntu18.04
cache_from:
- ${DOCKER_CACHE_REGISTRY}/build.ubuntu_gpu_cu102:latest
ubuntu_build_cuda:
image: ${DOCKER_CACHE_REGISTRY}/build.ubuntu_build_cuda:latest
build:
Expand Down
15 changes: 8 additions & 7 deletions ci/docker/runtime_functions.sh
Original file line number Diff line number Diff line change
Expand Up @@ -541,6 +541,7 @@ build_ubuntu_gpu_tensorrt() {

export CC=gcc-7
export CXX=g++-7
export ONNX_NAMESPACE=onnx

# Build ONNX
pushd .
Expand All @@ -549,29 +550,29 @@ build_ubuntu_gpu_tensorrt() {
rm -rf build
mkdir -p build
cd build
cmake -DBUILD_SHARED_LIBS=ON -GNinja ..
ninja onnx/onnx.proto
ninja
cmake -DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER} -DBUILD_SHARED_LIBS=ON ..
make -j$(nproc)
export LIBRARY_PATH=`pwd`:`pwd`/onnx/:$LIBRARY_PATH
export CPLUS_INCLUDE_PATH=`pwd`:$CPLUS_INCLUDE_PATH
export CXXFLAGS=-I`pwd`

popd

# Build ONNX-TensorRT
export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib
export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.1/targets/x86_64-linux/include/
export CPLUS_INCLUDE_PATH=${CPLUS_INCLUDE_PATH}:/usr/local/cuda-10.2/targets/x86_64-linux/include/
pushd .
cd 3rdparty/onnx-tensorrt/
mkdir -p build
cd build
cmake ..
cmake -DONNX_NAMESPACE=$ONNX_NAMESPACE ..
make -j$(nproc)
export LIBRARY_PATH=`pwd`:$LIBRARY_PATH
popd

mkdir -p /work/mxnet/lib/
cp 3rdparty/onnx-tensorrt/third_party/onnx/build/*.so /work/mxnet/lib/
cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser_runtime.so.0 /work/mxnet/lib/
cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so.0 /work/mxnet/lib/
cp -L 3rdparty/onnx-tensorrt/build/libnvonnxparser.so /work/mxnet/lib/

cd /work/build
cmake -DUSE_CUDA=1 \
Expand Down
2 changes: 1 addition & 1 deletion ci/jenkins/Jenkins_steps.groovy
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,7 @@ def compile_unix_tensorrt_gpu(lib_name) {
ws('workspace/build-tensorrt') {
timeout(time: max_time, unit: 'MINUTES') {
utils.init_git()
utils.docker_run('ubuntu_gpu_cu101', 'build_ubuntu_gpu_tensorrt', false)
utils.docker_run('ubuntu_gpu_cu102', 'build_ubuntu_gpu_tensorrt', false)
utils.pack_lib(lib_name, mx_tensorrt_lib)
}
}
Expand Down
4 changes: 0 additions & 4 deletions src/operator/subgraph/tensorrt/onnx_to_tensorrt.cc
Original file line number Diff line number Diff line change
Expand Up @@ -35,13 +35,9 @@
#include <google/protobuf/io/zero_copy_stream_impl.h>
#include <google/protobuf/text_format.h>
#include <onnx-tensorrt/NvOnnxParser.h>
#include <onnx-tensorrt/NvOnnxParserRuntime.h>
#include <dmlc/logging.h>
#include <dmlc/parameter.h>

#include <onnx-tensorrt/PluginFactory.hpp>
#include <onnx-tensorrt/plugin_common.hpp>

using std::cout;
using std::cerr;
using std::endl;
Expand Down

0 comments on commit 95fa63f

Please sign in to comment.