|
|
@@ -2,10 +2,6 @@
|
|
|
|
|
|
set -e
|
|
|
|
|
|
-TRT_VERSION='8.5.2.2'
|
|
|
-CUDA_VERSION='11.8'
|
|
|
-CUDNN_VERSION='8.6'
|
|
|
-
|
|
|
# deal cmd input
|
|
|
while [[ "$#" -gt 0 ]]; do
|
|
|
case "$1" in
|
|
|
@@ -13,6 +9,13 @@ while [[ "$#" -gt 0 ]]; do
|
|
|
--enable-benchmark) ENABLE_BENCHMARK="$2"; shift ;;
|
|
|
--paddleinference-url) PADDLEINFERENCE_URL="$2"; shift ;;
|
|
|
--paddleinference-version) PADDLEINFERENCE_VERSION="$2"; shift ;;
|
|
|
+ --enable-paddle-backend) ENABLE_PADDLE_BACKEND="$2"; shift ;;
|
|
|
+ --enable-ort-backend) ENABLE_ORT_BACKEND="$2"; shift ;;
|
|
|
+ --enable-openvino-backend) ENABLE_OPENVINO_BACKEND="$2"; shift ;;
|
|
|
+ --enable-trt-backend) ENABLE_TRT_BACKEND="$2"; shift ;;
|
|
|
+ --trt-directory) TRT_DIRECTORY="$2"; shift ;;
|
|
|
+ --enable-vision) ENABLE_VISION="$2"; shift ;;
|
|
|
+ --enable-text) ENABLE_TEXT="$2"; shift ;;
|
|
|
*) echo "Unknown parameter passed: $1"; exit 1 ;;
|
|
|
esac
|
|
|
shift
|
|
|
@@ -37,9 +40,15 @@ ln -sf /opt/_internal/cpython-3.10.0/bin/pip3.10 /usr/bin/pip
|
|
|
export LD_LIBRARY_PATH=/opt/_internal/cpython-3.10.0/lib:${LD_LIBRARY_PATH}
|
|
|
export PATH=/opt/_internal/cpython-3.10.0/bin:${PATH}
|
|
|
|
|
|
-rm -rf "TensorRT-${TRT_VERSION}" "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
-http_proxy= https_proxy= wget "https://fastdeploy.bj.bcebos.com/resource/TensorRT/TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
-tar -xzvf "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
+if [ "$ENABLE_TRT_BACKEND" = "ON" ] && [ "$TRT_DIRECTORY" = "Default" ]; then
|
|
|
+ TRT_VERSION='8.5.2.2'
|
|
|
+ CUDA_VERSION='11.8'
|
|
|
+ CUDNN_VERSION='8.6'
|
|
|
+ rm -rf "TensorRT-${TRT_VERSION}" "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
+ http_proxy= https_proxy= wget "https://fastdeploy.bj.bcebos.com/resource/TensorRT/TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
+ tar -xzvf "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
|
|
|
+ TRT_DIRECTORY="/workspace/ultra-infer/TensorRT-${TRT_VERSION}"
|
|
|
+fi
|
|
|
|
|
|
(
|
|
|
cd /workspace/ultra-infer
|
|
|
@@ -49,15 +58,15 @@ tar -xzvf "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${
|
|
|
-DLIBRARY_NAME='ultra_infer_runtime' \
|
|
|
-DCMAKE_INSTALL_PREFIX="${PWD}/ultra_infer_install" \
|
|
|
-DWITH_GPU="${WITH_GPU}" \
|
|
|
- -DENABLE_TRT_BACKEND="${WITH_GPU}" \
|
|
|
- -DTRT_DIRECTORY="/workspace/ultra-infer/TensorRT-${TRT_VERSION}" \
|
|
|
- -DENABLE_ORT_BACKEND=ON \
|
|
|
- -DENABLE_PADDLE_BACKEND=ON \
|
|
|
+ -DENABLE_TRT_BACKEND="${ENABLE_TRT_BACKEND}" \
|
|
|
+ -DTRT_DIRECTORY="${TRT_DIRECTORY}" \
|
|
|
+ -DENABLE_ORT_BACKEND="${ENABLE_ORT_BACKEND}" \
|
|
|
+ -DENABLE_PADDLE_BACKEND="${ENABLE_PADDLE_BACKEND}" \
|
|
|
-DPADDLEINFERENCE_URL="${PADDLEINFERENCE_URL}" \
|
|
|
-DPADDLEINFERENCE_VERSION="${PADDLEINFERENCE_VERSION}" \
|
|
|
- -DENABLE_OPENVINO_BACKEND=ON \
|
|
|
- -DENABLE_VISION=ON \
|
|
|
- -DENABLE_TEXT=ON \
|
|
|
+ -DENABLE_OPENVINO_BACKEND="${ENABLE_OPENVINO_BACKEND}" \
|
|
|
+ -DENABLE_VISION="${ENABLE_VISION}" \
|
|
|
+ -DENABLE_TEXT="${ENABLE_TEXT}" \
|
|
|
-DBUILD_ULTRAINFER_PYTHON=OFF \
|
|
|
-DBUILD_FD_TRITON_BACKEND=ON \
|
|
|
-DENABLE_BENCHMARK="${ENABLE_BENCHMARK}" \
|