_build_cpp.sh 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. #!/bin/bash
  2. set -e
  3. TRT_VERSION='8.5.2.2'
  4. CUDA_VERSION='11.8'
  5. CUDNN_VERSION='8.6'
  6. # deal cmd input
  7. while [[ "$#" -gt 0 ]]; do
  8. case "$1" in
  9. --with-gpu) WITH_GPU="$2"; shift ;;
  10. --enable-benchmark) ENABLE_BENCHMARK="$2"; shift ;;
  11. --paddleinference-url) PADDLEINFERENCE_URL="$2"; shift ;;
  12. --paddleinference-version) PADDLEINFERENCE_VERSION="$2"; shift ;;
  13. *) echo "Unknown parameter passed: $1"; exit 1 ;;
  14. esac
  15. shift
  16. done
  17. export DEBIAN_FRONTEND='noninteractive'
  18. export TZ='Asia/Shanghai'
  19. export CC=/usr/local/gcc-8.2/bin/gcc
  20. export CXX=/usr/local/gcc-8.2/bin/g++
  21. cd /workspace/ultra-infer
  22. wget -O /etc/yum.repos.d/CentOS-Base.repo http://mirrors.cloud.tencent.com/repo/centos7_base.repo
  23. yum clean all
  24. yum makecache
  25. yum install -y patchelf rapidjson-devel
  26. ln -sf /opt/_internal/cpython-3.10.0/bin/python3.10 /usr/bin/python
  27. ln -sf /opt/_internal/cpython-3.10.0/bin/pip3.10 /usr/bin/pip
  28. export LD_LIBRARY_PATH=/opt/_internal/cpython-3.10.0/lib:${LD_LIBRARY_PATH}
  29. export PATH=/opt/_internal/cpython-3.10.0/bin:${PATH}
  30. rm -rf "TensorRT-${TRT_VERSION}" "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
  31. http_proxy= https_proxy= wget "https://fastdeploy.bj.bcebos.com/resource/TensorRT/TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
  32. tar -xzvf "TensorRT-${TRT_VERSION}.Linux.x86_64-gnu.cuda-${CUDA_VERSION}.cudnn${CUDNN_VERSION}.tar.gz"
  33. (
  34. cd /workspace/ultra-infer
  35. rm -rf build && mkdir build && cd build
  36. unset http_proxy https_proxy
  37. cmake \
  38. -DLIBRARY_NAME='ultra_infer_runtime' \
  39. -DCMAKE_INSTALL_PREFIX="${PWD}/ultra_infer_install" \
  40. -DWITH_GPU="${WITH_GPU}" \
  41. -DENABLE_TRT_BACKEND="${WITH_GPU}" \
  42. -DTRT_DIRECTORY="/workspace/ultra-infer/TensorRT-${TRT_VERSION}" \
  43. -DENABLE_ORT_BACKEND=ON \
  44. -DENABLE_PADDLE_BACKEND=ON \
  45. -DPADDLEINFERENCE_URL="${PADDLEINFERENCE_URL}" \
  46. -DPADDLEINFERENCE_VERSION="${PADDLEINFERENCE_VERSION}" \
  47. -DENABLE_OPENVINO_BACKEND=ON \
  48. -DENABLE_VISION=ON \
  49. -DENABLE_TEXT=ON \
  50. -DBUILD_ULTRAINFER_PYTHON=OFF \
  51. -DBUILD_FD_TRITON_BACKEND=ON \
  52. -DENABLE_BENCHMARK="${ENABLE_BENCHMARK}" \
  53. ..
  54. make -j"$(nproc)"
  55. make install
  56. )