Skip to content

Commit

Permalink
[Refactor] refactor 2 basic demos and all related documents
Browse files Browse the repository at this point in the history
  • Loading branch information
zgjja committed Oct 21, 2024
1 parent 4ce6f20 commit cee3996
Show file tree
Hide file tree
Showing 20 changed files with 835 additions and 1,267 deletions.
49 changes: 49 additions & 0 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,49 @@
cmake_minimum_required(VERSION 3.14)

project(
tensorrtx
VERSION 0.1
LANGUAGES C CXX CUDA)

set(CMAKE_CXX_STANDARD 17)
set(CMAKE_CXX_STANDARD_REQUIRED ON)
set(CMAKE_CUDA_STANDARD 17)
set(CMAKE_CUDA_STANDARD_REQUIRED ON)

set(CMAKE_EXPORT_COMPILE_COMMANDS ON)

set(CMAKE_BUILD_TYPE
"Debug"
CACHE STRING "build type" FORCE)

option(CUDA_USE_STATIC_CUDA_RUNTIME "use static cuda runtime lib" OFF)

if(NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
set(CMAKE_CUDA_ARCHITECTURES
60
70
72
75
80
86
89)
endif()

find_package(Threads REQUIRED)
find_package(CUDAToolkit REQUIRED)

include(cmake/FindTensorRT.cmake)

set(TensorRT_7_TARGETS mlp lenet)

set(TensorRT_8_TARGETS)

set(TensorRT_10_TARGETS)

set(ALL_TARGETS ${TensorRT_7_TARGETS} ${TensorRT_8_TARGETS}
${TensorRT_10_TARGETS})

foreach(sub_dir ${ALL_TARGETS})
message(STATUS "Add subdirectory: ${sub_dir}")
add_subdirectory(${sub_dir})
endforeach()
40 changes: 36 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ The basic workflow of TensorRTx is:
- [A guide for quickly getting started, taking lenet5 as a demo.](./tutorials/getting_started.md)
- [The .wts file content format](./tutorials/getting_started.md#the-wts-content-format)
- [Frequently Asked Questions (FAQ)](./tutorials/faq.md)
- [Migrating from TensorRT 4 to 7](./tutorials/migrating_from_tensorrt_4_to_7.md)
- [Migration Guide](./tutorials/migration_guide.md)
- [How to implement multi-GPU processing, taking YOLOv4 as example](./tutorials/multi_GPU_processing.md)
- [Check if Your GPU support FP16/INT8](./tutorials/check_fp16_int8_support.md)
- [How to Compile and Run on Windows](./tutorials/run_on_windows.md)
Expand All @@ -47,12 +47,44 @@ The basic workflow of TensorRTx is:

## Test Environment

1. TensorRT 7.x
2. TensorRT 8.x(Some of the models support 8.x)
1. (**NOT recommended**) TensorRT 7.x
2. (**Recommended**)TensorRT 8.x
3. (**NOT recommended**) TensorRT 10.x

### Note

1. For history reason, some of the models are limited to specific TensorRT version, please check the README.md or code for the model you want to use.
2. Currently, TensorRT 8.x has better compatibility and the most of the features supported.

## How to run

Each folder has a readme inside, which explains how to run the models inside.
**Note**: many of the network maybe not included in the "top" cmake project, because they are under refactoring, so please enter its subfolder to build.

Each model is a subproject, you can build them with cmake like:
```BASH
# 1. generate xxx.wts from https://github.com/wang-xinyu/pytorchx/tree/master/lenet
# ...

# 2. put xxx.wts on top of this folder
# ...

# 3.1 (Option) Uncomment the model you don't want to build or not suppoted by your TensorRT version in CMakeLists.txt
# ...
# 3.2 configure project with cmake and build with Ninja
cd tensorrtx
cmake -S . -B build -G Ninja
ninja -C build

# 4.1 Serialize model to plan file i.e. 'xxx.engine'
build/lenet -s
# 4.2 deserialize plan file and run inference
build/lenet -d

# 5. (Optional) See if the output is same as pytorchx/lenet
# ...
```

For more details, each subfolder may contain a README.md inside, which explains more.

## Models

Expand Down
76 changes: 76 additions & 0 deletions cmake/FindTensorRT.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,76 @@
cmake_minimum_required(VERSION 3.17.0)

set(TRT_VERSION
$ENV{TRT_VERSION}
CACHE STRING
"TensorRT version, e.g. \"8.6.1.6\" or \"8.6.1.6+cuda12.0.1.011\"")

# process for different TensorRT version
if(DEFINED TRT_VERSION AND NOT TRT_VERSION STREQUAL "")
string(REGEX MATCH "([0-9]+)" _match ${TRT_VERSION})
set(TRT_MAJOR_VERSION "${_match}")
set(_modules nvinfer nvinfer_plugin)

if(TRT_MAJOR_VERSION GREATER_EQUAL 8)
list(APPEND _modules nvinfer_vc_plugin nvinfer_dispatch nvinfer_lean)
endif()
else()
message(FATAL_ERROR "Please set a environment variable \"TRT_VERSION\"")
endif()

if(NOT TensorRT_INCLUDE_DIR)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(_trt_inc_dir "/usr/local/cuda/targets/aarch64-linux/include")
else()
set(_trt_inc_dir "/usr/include/x86_64-linux-gnu")
endif()
set(TensorRT_INCLUDE_DIR
${_trt_inc_dir}
CACHE PATH "TensorRT_INCLUDE_DIR")
message(STATUS "TensorRT: ${TensorRT_INCLUDE_DIR}")
endif()

if(NOT TensorRT_LIBRARY_DIR)
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
set(_trt_lib_dir "/usr/local/cuda/targets/aarch64-linux/lib")
else()
set(_trt_lib_dir "/usr/include/x86_64-linux-gnu")
endif()
set(TensorRT_LIBRARY_DIR
${_trt_lib_dir}
CACHE PATH "TensorRT_LIBRARY_DIR")
message(STATUS "TensorRT: ${TensorRT_LIBRARY_DIR}")
endif()

set(TensorRT_LIBRARIES)

foreach(lib IN LISTS _modules)
find_library(
TensorRT_${lib}_LIBRARY
NAMES ${lib}
HINTS ${TensorRT_LIBRARY_DIR})

list(APPEND TensorRT_LIBRARIES ${TensorRT_${lib}_LIBRARY})
endforeach()

message(STATUS "Found TensorRT lib: ${TensorRT_LIBRARIES}")

if(NOT TARGET TensorRT::TensorRT)
add_library(TensorRT IMPORTED INTERFACE)
add_library(TensorRT::TensorRT ALIAS TensorRT)
endif()

target_link_libraries(TensorRT INTERFACE ${TensorRT_LIBRARIES})

set_target_properties(
TensorRT
PROPERTIES C_STANDARD 17
CXX_STANDARD 17
POSITION_INDEPENDENT_CODE ON
SKIP_BUILD_RPATH TRUE
BUILD_WITH_INSTALL_RPATH TRUE
INSTALL_RPATH "$\{ORIGIN\}"
INTERFACE_INCLUDE_DIRECTORIES "${TensorRT_INCLUDE_DIR}")

unset(TRT_MAJOR_VERSION)
unset(_modules)
Loading

0 comments on commit cee3996

Please sign in to comment.