$ wget http://www.cmake.org/files/v3.13/cmake-3.13.0.tar.gz
$ tar xpvf cmake-3.13.0.tar.gz cmake-3.13.0
$ cd cmake-3.13.0
$ ./bootstrap --system-curl
$ make -j4
$ sudo ln -s /home/nano/Data/cmake-3.13.0/cmake /usr/local/bin/cmake
$ cd TensorRT/
$ git submodule update --init --recursive
$ export TRT_SOURCE=`pwd`
$ cd $TRT_SOURCE
$ mkdir -p build && cd build
$ /usr/local/bin/cmake .. -DGPU_ARCHS=53 \
-DTRT_LIB_DIR=/usr/lib/aarch64-linux-gnu/ \
-DCMAKE_C_COMPILER=/usr/bin/gcc \
-DCMAKE_CUDA_COMPIILER=/usr/local/cuda-10.2/bin/nvcc \
-DTRT_BIN_DIR=`pwd`/out$ make nvinfer_plugin -j$(nproc)$ mkdir backup
$ sudo mv /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.7.1.3 backup
$ sudo cp libnvinfer_plugin.so.7.1.3 /usr/lib/aarch64-linux-gnu/libnvinfer_plugin.so.7.1.3
$ sudo ldconfig
由此下載 tlt-converter
$ mkdir tlt-converter
$ mv tlt_7.1.zip tlt-converter
$ cd tlt-converter
$ unzip tlt_7.1.zip
$ vi ~/.bashrc 加入export TRT_LIB_PATH=”/usr/lib/aarch64-linux-gnu”
export TRT_INC_PATH=”/usr/include/aarch64-linux-gnu”
沒有留言:
張貼留言