Learn C++ 2: Integrating TensorRT Libraries into CMake Projects
I downloaded the tar file for TensorRT
local installation and unpacked it into a local folder. I want to use the dynamic libraries in it such as libnvinfer.so
when compiling my project with CMake. How shoud I configurate it in the CMakeLists.txt?
- Locate the directory into which you unpack TensorRT tar file. This directory contains the header files (‘include/*.h’) and libraries (‘lib/*.so’).
set(TENSORRT_DIR /path/to/tensorrt)
- Use
find_library
command to find (‘*.so’) under directorylib
find_library(NVINFER_LIB libnvinfer.so PATHS ${TENSORRT_DIR}/lib)
- Use
include_directories
command to include header files (‘*.h’) under directoryinclude
include_directories(${TENSORRT_DIR}/include)
- Use
link_libraries
command to link the TensorRT libraries to your targetlink_libraries(${NVINFER_LIB} ${NVONNXPARSER_LIB} ${CUDA_LIBRARIES})
The code block below is the complete CMake file.
cmake_minimum_required(VERSION 3.11)
project(trt_examples)
set(CMAKE_CXX_STANDARD 14)
# CUDA
find_package(CUDA REQUIRED)
include_directories(${CUDA_INCLUDE_DIRS})
message("CUDA_TOOLKIT_ROOT_DIR = ${CUDA_TOOLKIT_ROOT_DIR}")
message("CUDA_INCLUDE_DIRS = ${CUDA_INCLUDE_DIRS}")
message("CUDA_LIBRARIES = ${CUDA_LIBRARIES}")
# TensorRT
set(TENSORRT_DIR "/home/gluo/local_lib/TensorRT-8.6.1.6/")
find_library(NVINFER_LIB libnvinfer.so PATHS ${TENSORRT_DIR}/lib)
find_library(NVONNXPARSER_LIB libnvonnxparser.so PATHS ${TENSORRT_DIR}/lib)
include_directories(${TENSORRT_DIR}/include)
link_libraries(${NVINFER_LIB} ${NVONNXPARSER_LIB} ${CUDA_LIBRARIES})
add_executable(example1 example1.cpp)
Enjoy Reading This Article?
Here are some more articles you might like to read next: