Просмотр исходного кода

rename (#2687)

* rename

* fix

* rerun CI
zhang-prog 11 месяцев назад
Родитель
Сommit
9f9141b56e
100 измененных файлов с 6053 добавлено и 0 удалено
  1. 54 0
      libs/ultra_infer/.gitignore
  2. 735 0
      libs/ultra_infer/CMakeLists.txt
  3. 0 0
      libs/ultra_infer/LICENSE
  4. 0 0
      libs/ultra_infer/ThirdPartyNotices.txt
  5. 335 0
      libs/ultra_infer/UltraInfer.cmake.in
  6. 13 0
      libs/ultra_infer/UltraInferCSharp.cmake.in
  7. 0 0
      libs/ultra_infer/VERSION_NUMBER
  8. 0 0
      libs/ultra_infer/cmake/UltraInferConfig.cmake
  9. 0 0
      libs/ultra_infer/cmake/ascend.cmake
  10. 0 0
      libs/ultra_infer/cmake/build_paddle2onnx.cmake
  11. 0 0
      libs/ultra_infer/cmake/build_tools.cmake
  12. 0 0
      libs/ultra_infer/cmake/check.cmake
  13. 38 0
      libs/ultra_infer/cmake/config_cpack.cmake
  14. 0 0
      libs/ultra_infer/cmake/cuda.cmake
  15. 0 0
      libs/ultra_infer/cmake/cvcuda.cmake
  16. 0 0
      libs/ultra_infer/cmake/faiss.cmake
  17. 0 0
      libs/ultra_infer/cmake/fast_tokenizer.cmake
  18. 0 0
      libs/ultra_infer/cmake/flycv.cmake
  19. 89 0
      libs/ultra_infer/cmake/gflags.cmake
  20. 0 0
      libs/ultra_infer/cmake/glog.cmake
  21. 0 0
      libs/ultra_infer/cmake/gtest.cmake
  22. 0 0
      libs/ultra_infer/cmake/horizon.cmake
  23. 0 0
      libs/ultra_infer/cmake/kunlunxin.cmake
  24. 0 0
      libs/ultra_infer/cmake/onnxruntime.cmake
  25. 90 0
      libs/ultra_infer/cmake/opencv.cmake
  26. 0 0
      libs/ultra_infer/cmake/openvino.cmake
  27. 0 0
      libs/ultra_infer/cmake/paddle2onnx.cmake
  28. 0 0
      libs/ultra_infer/cmake/paddle_inference.cmake
  29. 0 0
      libs/ultra_infer/cmake/paddlelite.cmake
  30. 0 0
      libs/ultra_infer/cmake/poros.cmake
  31. 0 0
      libs/ultra_infer/cmake/rknpu2.cmake
  32. 0 0
      libs/ultra_infer/cmake/sophgo.cmake
  33. 84 0
      libs/ultra_infer/cmake/summary.cmake
  34. 0 0
      libs/ultra_infer/cmake/timvx.cmake
  35. 0 0
      libs/ultra_infer/cmake/toolchain.cmake
  36. 0 0
      libs/ultra_infer/cmake/tvm.cmake
  37. 223 0
      libs/ultra_infer/cmake/utils.cmake
  38. 0 0
      libs/ultra_infer/cpack/debian_postinst.in
  39. 0 0
      libs/ultra_infer/cpack/debian_prerm.in
  40. 0 0
      libs/ultra_infer/cpack/rpm_postinst.in
  41. 0 0
      libs/ultra_infer/cpack/rpm_postrm.in
  42. 0 0
      libs/ultra_infer/python/__init__.py
  43. 0 0
      libs/ultra_infer/python/requirements.txt
  44. 0 0
      libs/ultra_infer/python/scripts/__init__.py
  45. 12 0
      libs/ultra_infer/python/scripts/build_gpu.sh
  46. 207 0
      libs/ultra_infer/python/scripts/process_libraries.py.in
  47. 485 0
      libs/ultra_infer/python/setup.py
  48. 186 0
      libs/ultra_infer/python/ultra_infer/__init__.py
  49. 0 0
      libs/ultra_infer/python/ultra_infer/c_lib_wrap.py.in
  50. 0 0
      libs/ultra_infer/python/ultra_infer/download.py
  51. 0 0
      libs/ultra_infer/python/ultra_infer/model.py
  52. 0 0
      libs/ultra_infer/python/ultra_infer/pipeline/__init__.py
  53. 58 0
      libs/ultra_infer/python/ultra_infer/pipeline/pptinypose/__init__.py
  54. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/__init__.py
  55. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/base.py
  56. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/ts/__init__.py
  57. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/ts/model.py
  58. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/ts/processors.py
  59. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/vision/__init__.py
  60. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/vision/model.py
  61. 0 0
      libs/ultra_infer/python/ultra_infer/py_only/vision/processors.py
  62. 706 0
      libs/ultra_infer/python/ultra_infer/runtime.py
  63. 0 0
      libs/ultra_infer/python/ultra_infer/text/__init__.py
  64. 0 0
      libs/ultra_infer/python/ultra_infer/text/uie/__init__.py
  65. 0 0
      libs/ultra_infer/python/ultra_infer/ts/__init__.py
  66. 0 0
      libs/ultra_infer/python/ultra_infer/ts/anomalydetection/__init__.py
  67. 0 0
      libs/ultra_infer/python/ultra_infer/ts/anomalydetection/ppts/__init__.py
  68. 0 0
      libs/ultra_infer/python/ultra_infer/ts/classification/__init__.py
  69. 0 0
      libs/ultra_infer/python/ultra_infer/ts/classification/ppts/__init__.py
  70. 0 0
      libs/ultra_infer/python/ultra_infer/ts/forecasting/__init__.py
  71. 0 0
      libs/ultra_infer/python/ultra_infer/ts/forecasting/ppts/__init__.py
  72. 0 0
      libs/ultra_infer/python/ultra_infer/utils/__init__.py
  73. 0 0
      libs/ultra_infer/python/ultra_infer/utils/example_resource.py
  74. 0 0
      libs/ultra_infer/python/ultra_infer/utils/hub_config.py
  75. 57 0
      libs/ultra_infer/python/ultra_infer/utils/hub_env.py
  76. 134 0
      libs/ultra_infer/python/ultra_infer/utils/hub_model_server.py
  77. 0 0
      libs/ultra_infer/python/ultra_infer/utils/misc.py
  78. 0 0
      libs/ultra_infer/python/ultra_infer/vision/__init__.py
  79. 0 0
      libs/ultra_infer/python/ultra_infer/vision/classification/__init__.py
  80. 0 0
      libs/ultra_infer/python/ultra_infer/vision/classification/contrib/__init__.py
  81. 104 0
      libs/ultra_infer/python/ultra_infer/vision/classification/contrib/resnet.py
  82. 140 0
      libs/ultra_infer/python/ultra_infer/vision/classification/contrib/yolov5cls.py
  83. 288 0
      libs/ultra_infer/python/ultra_infer/vision/classification/ppcls/__init__.py
  84. 145 0
      libs/ultra_infer/python/ultra_infer/vision/classification/ppshitu/__init__.py
  85. 0 0
      libs/ultra_infer/python/ultra_infer/vision/common/__init__.py
  86. 0 0
      libs/ultra_infer/python/ultra_infer/vision/common/manager.py
  87. 0 0
      libs/ultra_infer/python/ultra_infer/vision/common/processors.py
  88. 0 0
      libs/ultra_infer/python/ultra_infer/vision/detection/__init__.py
  89. 0 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/__init__.py
  90. 157 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/fastestdet.py
  91. 135 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py
  92. 0 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/rkyolo/__init__.py
  93. 315 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/rkyolo/rkyolov5.py
  94. 146 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py
  95. 145 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolor.py
  96. 227 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5.py
  97. 191 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py
  98. 222 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py
  99. 145 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov6.py
  100. 187 0
      libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov7.py

+ 54 - 0
libs/ultra_infer/.gitignore

@@ -0,0 +1,54 @@
+build
+cmake-build-debug
+cmake-build-release
+.vscode
+UltraInfer.cmake
+build-debug.sh
+*dist
+ultra_infer.egg-info
+ultra_infer_python.egg-info
+ultra_infer_gpu_python.egg-info
+.setuptools-cmake-build
+ultra_infer/version.py
+ultra_infer/core/config.h
+python/ultra_infer/c_lib_wrap.py
+python/ultra_infer/LICENSE*
+python/build_cpu.sh
+python/ultra_infer/ThirdPartyNotices*
+*.so*
+python/ultra_infer/libs/third_libs
+ultra_infer/core/config.h
+ultra_infer/pybind/main.cc
+python/ultra_infer/libs/lib*
+python/ultra_infer/libs/third_libs
+__pycache__
+python/scripts/process_libraries.py
+.vs
+.idea
+.DS_Store
+miniprogram_npm
+node_modules
+.DS_Store
+dist
+etc
+lib
+dist-ssr
+coverage
+*.local
+yalc.*
+.yalc
+examples/vision/collect_quantize_cc.sh
+examples/vision/tests_quantize
+ultra_infer/LICENSE
+ultra_infer/ThirdPartyNotices.txt
+UltraInferCSharp.cmake
+python/ultra_infer/code_version.py
+*.pdmodel
+*.pdiparams
+*.pdiparams.info
+log.txt
+benchmark/paddlex/build
+benchmark/cpp/build
+!paddlex/paddlex3.0/serving/libs/**/*.so*
+TensorRT*
+third_party

+ 735 - 0
libs/ultra_infer/CMakeLists.txt

@@ -0,0 +1,735 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+PROJECT(ultra_infer C CXX)
+CMAKE_MINIMUM_REQUIRED(VERSION 3.10)
+
+
+option(CSRCS_DIR_NAME "Name of source code directory")
+option(LIBRARY_NAME "Name of build library name")
+option(PY_LIBRARY_NAME "Name of build python library name")
+if(NOT CSRCS_DIR_NAME)
+  set(CSRCS_DIR_NAME ".")
+endif()
+if(NOT LIBRARY_NAME)
+  set(LIBRARY_NAME "ultra_infer")
+endif()
+if(NOT PY_LIBRARY_NAME)
+  set(PY_LIBRARY_NAME "ultra_infer_main")
+endif()
+
+include(ExternalProject)
+set(THIRD_PARTY_PATH ${CMAKE_CURRENT_BINARY_DIR}/third_libs)
+set(THIRD_PARTY_DIR ${PROJECT_SOURCE_DIR}/third_party)
+
+add_subdirectory(${CSRCS_DIR_NAME}/ultra_infer)
+include(${PROJECT_SOURCE_DIR}/cmake/utils.cmake)
+
+# Set C++11 as standard for the whole project
+if(NOT MSVC)
+  if(NOT DEFINED CMAKE_CXX_STANDARD)
+    set(CMAKE_CXX_STANDARD 11)
+  endif()
+  set(CMAKE_CXX_FLAGS "-Wno-format -g0 -O3")
+  if(NEED_ABI0)
+    add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+  else()
+    add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
+  endif()
+endif(NOT MSVC)
+
+include(${PROJECT_SOURCE_DIR}/cmake/build_tools.cmake)
+if(UNIX AND (NOT APPLE) AND (NOT WITH_TIMVX))
+  download_patchelf()
+  set(PATCHELF_EXE ${THIRD_PARTY_PATH}/patchelf/bin/patchelf)
+endif()
+
+
+############################# Basic Options for UltraInfer ################################
+option(WITH_GPU "Whether WITH_GPU=ON, will enable onnxruntime-gpu/paddle-infernce-gpu/poros-gpu" OFF)
+option(WITH_IPU "Whether WITH_IPU=ON, will enable paddle-infernce-ipu" OFF)
+option(WITH_OPENCL "Whether WITH_OPENCL=ON, will enable paddle-lite-gpu" OFF)
+option(ENABLE_ORT_BACKEND "Whether to enable onnxruntime backend." OFF)
+option(ENABLE_TRT_BACKEND "Whether to enable tensorrt backend." OFF)
+option(ENABLE_PADDLE_BACKEND "Whether to enable paddle backend." OFF)
+option(ENABLE_POROS_BACKEND "Whether to enable poros backend." OFF)
+option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF)
+option(ENABLE_RKNPU2_BACKEND "Whether to enable RKNPU2 backend." OFF)
+option(ENABLE_SOPHGO_BACKEND "Whether to enable SOPHON backend." OFF)
+option(ENABLE_TVM_BACKEND "Whether to enable TVM backend." OFF)
+option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF)
+option(ENABLE_HORIZON_BACKEND "Whether to enable HORIZON backend." OFF)
+option(ENABLE_VISION "Whether to enable vision models usage." OFF)
+option(ENABLE_TEXT "Whether to enable text models usage." OFF)
+option(ENABLE_FLYCV "Whether to enable flycv to boost image preprocess." OFF)
+option(ENABLE_CVCUDA "Whether to enable NVIDIA CV-CUDA to boost image preprocess." OFF)
+option(ENABLE_BENCHMARK "Whether to enable Benchmark mode." OFF)
+option(WITH_ASCEND "Whether to compile for Huawei Ascend deploy." OFF)
+option(WITH_DIRECTML "Whether to compile for onnxruntime DirectML deploy." OFF)
+option(WITH_TIMVX "Whether to compile for TIMVX deploy." OFF)
+option(WITH_KUNLUNXIN "Whether to compile for KunlunXin XPU deploy." OFF)
+option(WITH_TESTING "Whether to compile with unittest." OFF)
+option(WITH_CAPI "Whether to compile with c api." OFF)
+option(WITH_CSHARPAPI "Whether to compile with c# api" OFF)
+
+option(BUILD_EXAMPLES "Whether to build ultra_infer with vision examples" OFF)
+option(BUILD_PADDLE2ONNX "Whether to build paddle2onnx from sources" OFF)
+
+option(BUILD_FD_TRITON_BACKEND "Whether to compile as Triton Inference Server backend." OFF)
+
+######################### Paths to user's custom libraries directory #####################
+set(CUDA_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of cuda library.")
+set(TRT_DIRECTORY "" CACHE PATH "If build tensorrt backend, need to define path of tensorrt library.")
+set(ORT_DIRECTORY "" CACHE PATH "User can specify the installed onnxruntime directory.")
+set(OPENCV_DIRECTORY "" CACHE PATH "User can specify the installed opencv directory.")
+set(OPENVINO_DIRECTORY "" CACHE PATH "User can specify the installed openvino directory.")
+
+# Whether to build ultra_infer on device Nvidia Jetson
+# Only support CPU Inference & GPU(TensorRT) Inference Now
+option(BUILD_ON_JETSON "Whether to build ultra_infer on Nvidia Jetson" OFF)
+if(BUILD_ON_JETSON)
+  set(WITH_GPU ON)
+  set(ENABLE_TRT_BACKEND ON)
+  set(ENABLE_ORT_BACKEND ON)
+endif()
+
+# config GIT_URL with github mirrors to speed up dependent repos clone
+option(GIT_URL "Git URL to clone dependent repos" ${GIT_URL})
+if(NOT GIT_URL)
+    set(GIT_URL "https://github.com")
+endif()
+
+# check build options
+include(${PROJECT_SOURCE_DIR}/cmake/check.cmake)
+
+if(WIN32)
+  add_definitions(-DYAML_CPP_DLL)
+  set(YAML_BUILD_SHARED_LIBS ON)
+  set(YAML_CPP_INSTALL ON)
+  set(CMAKE_POLICY_DEFAULT_CMP0077 NEW)
+endif()
+
+if(NOT CUDA_DIRECTORY)
+  set(CUDA_DIRECTORY "/usr/local/cuda")
+endif()
+
+option(BUILD_ULTRAINFER_PYTHON "if build python lib for ultra_infer." OFF)
+
+set(HEAD_DIR "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}")
+include_directories(${HEAD_DIR})
+include_directories(${CMAKE_CURRENT_BINARY_DIR})
+
+if (WITH_TIMVX)
+  include(${PROJECT_SOURCE_DIR}/cmake/timvx.cmake)
+endif()
+
+if (WITH_ASCEND)
+  include(${PROJECT_SOURCE_DIR}/cmake/ascend.cmake)
+endif()
+
+if (WITH_KUNLUNXIN)
+  include(${PROJECT_SOURCE_DIR}/cmake/kunlunxin.cmake)
+endif()
+
+if(WITH_IPU)
+  if(NOT ENABLE_PADDLE_BACKEND)
+    message("Will force to set ENABLE_PADDLE_BACKEND when build with GraphCore IPU.")
+    set(ENABLE_PADDLE_BACKEND ON)
+  endif()
+  add_definitions(-DWITH_IPU)
+endif()
+
+# Check for macOS architecture
+get_osx_architecture()
+
+##################################### Building: UltraInfer C++ SDK #######################################
+add_definitions(-DULTRAINFER_LIB)
+# set CMAKE_BUILD_TYPE to Release
+add_definitions(-DCMAKE_BUILD_TYPE=Release)
+# configure files before glob sources.
+configure_file(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/core/config.h.in ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/core/config.h)
+configure_file(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/pybind/main.cc.in ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/pybind/main.cc)
+file(GLOB_RECURSE ALL_DEPLOY_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/*.cc)
+file(GLOB_RECURSE DEPLOY_ORT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/ort/*.cc)
+file(GLOB_RECURSE DEPLOY_PADDLE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/paddle/*.cc)
+file(GLOB_RECURSE DEPLOY_POROS_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/poros/*.cc)
+file(GLOB_RECURSE DEPLOY_TRT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/tensorrt/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/tensorrt/*.cpp)
+file(GLOB_RECURSE DEPLOY_OPENVINO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/openvino/*.cc)
+file(GLOB_RECURSE DEPLOY_RKNPU2_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/rknpu2/*.cc)
+file(GLOB_RECURSE DEPLOY_HORIZON_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/horizon/*.cc)
+file(GLOB_RECURSE DEPLOY_SOPHGO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/sophgo/*.cc)
+file(GLOB_RECURSE DEPLOY_TVM_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/tvm/*.cc)
+file(GLOB_RECURSE DEPLOY_LITE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/lite/*.cc)
+file(GLOB_RECURSE DEPLOY_PIPELINE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/pipeline/*.cc)
+file(GLOB_RECURSE DEPLOY_VISION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/vision/*.cc)
+file(GLOB_RECURSE DEPLOY_TEXT_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/text/*.cc)
+file(GLOB_RECURSE DEPLOY_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/pybind/*.cc ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/*_pybind.cc)
+file(GLOB_RECURSE DEPLOY_PADDLE_CUSTOM_OP_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/paddle/ops/*.cc)
+if(WITH_GPU)
+  file(GLOB_RECURSE DEPLOY_CUDA_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/*.cu)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_CUDA_SRCS})
+  file(GLOB_RECURSE DEPLOY_PADDLE_CUSTOM_OP_CUDA_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/paddle/ops/*.cu)
+  list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_CUSTOM_OP_CUDA_SRCS})
+  file(GLOB_RECURSE DEPLOY_VISION_CUDA_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/vision/*.cu)
+  list(APPEND DEPLOY_VISION_SRCS ${DEPLOY_VISION_CUDA_SRCS})
+  file(GLOB_RECURSE DEPLOY_TEXT_CUDA_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/text/*.cu)
+  list(APPEND DEPLOY_TEXT_SRCS ${DEPLOY_TEXT_CUDA_SRCS})
+endif()
+list(REMOVE_ITEM DEPLOY_PADDLE_SRCS ${DEPLOY_PADDLE_CUSTOM_OP_SRCS})
+list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS}
+                                 ${DEPLOY_POROS_SRCS} ${DEPLOY_TRT_SRCS}
+                                 ${DEPLOY_OPENVINO_SRCS} ${DEPLOY_LITE_SRCS}
+                                 ${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS}
+                                 ${DEPLOY_PIPELINE_SRCS} ${DEPLOY_RKNPU2_SRCS}
+                                 ${DEPLOY_SOPHGO_SRCS}
+                                 ${DEPLOY_HORIZON_SRCS} ${DEPLOY_TVM_SRCS}
+                                 ${DEPLOY_PADDLE_CUSTOM_OP_SRCS})
+
+
+set(DEPEND_LIBS "")
+
+file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" ULTRAINFER_VERSION)
+string(STRIP "${ULTRAINFER_VERSION}" ULTRAINFER_VERSION)
+
+# Add eigen lib
+download_eigen()
+include_directories(${PROJECT_SOURCE_DIR}/third_party/eigen)
+if(WIN32)
+  add_definitions(-DEIGEN_STRONG_INLINE=inline)
+endif()
+
+# sw(sunway) not support thread_local semantic
+if(WITH_SW)
+  add_definitions(-DEIGEN_AVOID_THREAD_LOCAL)
+endif()
+
+if(ENABLE_ORT_BACKEND)
+  set(ENABLE_PADDLE2ONNX ON)
+  add_definitions(-DENABLE_ORT_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/onnxruntime.cmake)
+  list(APPEND DEPEND_LIBS external_onnxruntime)
+endif()
+
+if(ENABLE_LITE_BACKEND)
+  add_definitions(-DENABLE_LITE_BACKEND)
+  include(${PROJECT_SOURCE_DIR}/cmake/paddlelite.cmake)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_LITE_SRCS})
+  list(APPEND DEPEND_LIBS external_paddle_lite)
+endif()
+
+if(ENABLE_PADDLE_BACKEND)
+  set(ENABLE_PADDLE2ONNX ON)
+  add_definitions(-DENABLE_PADDLE_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/paddle_inference.cmake)
+  list(APPEND DEPEND_LIBS external_paddle_inference)
+  if(external_dnnl_FOUND)
+    list(APPEND DEPEND_LIBS external_dnnl external_omp)
+  endif()
+  if(external_ort_FOUND)
+    list(APPEND DEPEND_LIBS external_p2o external_ort)
+  endif()
+  if(PADDLEINFERENCE_API_CUSTOM_OP)
+    set_paddle_custom_ops_compatible_policy()
+    list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_CUSTOM_OP_SRCS})
+    if(WITH_GPU)
+      list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PADDLE_CUSTOM_OP_CUDA_SRCS})
+    endif()
+  endif()
+endif()
+
+if(ENABLE_OPENVINO_BACKEND)
+  set(ENABLE_PADDLE2ONNX ON)
+  add_definitions(-DENABLE_OPENVINO_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_OPENVINO_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/openvino.cmake)
+endif()
+
+if(ENABLE_RKNPU2_BACKEND)
+  add_definitions(-DENABLE_RKNPU2_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_RKNPU2_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/rknpu2.cmake)
+  list(APPEND DEPEND_LIBS ${RKNN_RT_LIB})
+endif()
+
+if(ENABLE_HORIZON_BACKEND)
+  add_definitions(-DENABLE_HORIZON_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_HORIZON_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/horizon.cmake)
+  list(APPEND DEPEND_LIBS ${BPU_libs})
+endif()
+
+if(ENABLE_TVM_BACKEND)
+  set(CMAKE_CXX_STANDARD 17)
+  add_definitions(-DENABLE_TVM_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TVM_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/tvm.cmake)
+  list(APPEND DEPEND_LIBS ${TVM_RUNTIME_LIB})
+endif()
+
+if(ENABLE_SOPHGO_BACKEND)
+  add_definitions(-DENABLE_SOPHGO_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_SOPHGO_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/sophgo.cmake)
+  list(APPEND DEPEND_LIBS ${SOPHGO_RT_LIB})
+endif()
+
+if(ENABLE_POROS_BACKEND)
+  set(CMAKE_CXX_STANDARD 14)
+  add_definitions(-DENABLE_POROS_BACKEND)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_POROS_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/poros.cmake)
+  list(APPEND DEPEND_LIBS external_poros)
+  set(PYTHON_MINIMUM_VERSION 3.6)
+  set(PYTORCH_MINIMUM_VERSION 1.9)
+  set(TENSORRT_MINIMUM_VERSION 8.0)
+  # find python3
+  find_package(Python3 ${PYTHON_MINIMUM_VERSION} REQUIRED COMPONENTS Interpreter Development)
+  message(STATUS "Found Python: ${Python3_VERSION_MAJOR}.${Python3_VERSION_MINOR}.${Python3_VERSION_PATCH}")
+
+  if (NOT Python3_SITELIB)
+    message(FATAL_ERROR "site-packages not found. ")
+  else ()
+    message(STATUS "site-packages: ${Python3_SITELIB}")
+  endif ()
+  include_directories(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/poros/common)
+  # find trt
+  if(NOT WITH_GPU)
+  message(FATAL_ERROR "While -DENABLE_POROS_BACKEND=ON, must set -DWITH_GPU=ON, but now it's OFF")
+  endif()
+  if(NOT TRT_DIRECTORY)
+    message(FATAL_ERROR "While -DENABLE_POROS_BACKEND=ON, must define -DTRT_DIRECTORY, e.g -DTRT_DIRECTORY=/Downloads/TensorRT-8.4")
+  endif()
+  include_directories(${TRT_DIRECTORY}/include)
+  find_library(TRT_INFER_LIB nvinfer ${TRT_DIRECTORY}/lib)
+  find_library(TRT_ONNX_LIB nvonnxparser ${TRT_DIRECTORY}/lib)
+  find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_DIRECTORY}/lib)
+  list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
+  if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
+    file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
+  endif()
+  if(EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
+    file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
+  endif()
+  find_package(Python COMPONENTS Interpreter Development REQUIRED)
+  message(STATUS "Copying ${TRT_DIRECTORY}/lib to ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib ...")
+  execute_process(COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/scripts/copy_directory.py ${TRT_DIRECTORY}/lib ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib)
+endif()
+
+if(WITH_GPU)
+  add_definitions(-DWITH_GPU)
+  include_directories(${CUDA_DIRECTORY}/include)
+  if(WIN32)
+    find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib/x64)
+    find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib/x64)
+    add_definitions(-DENABLE_NVJPEG)
+  else()
+    find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib64)
+    if(NOT BUILD_ON_JETSON)
+      find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib64)
+      add_definitions(-DENABLE_NVJPEG)
+    endif()
+  endif()
+  list(APPEND DEPEND_LIBS ${CUDA_LIB} ${NVJPEG_LIB})
+
+  # build CUDA source files in ultra_infer, CUDA source files include CUDA preprocessing, TRT plugins, etc.
+  enable_language(CUDA)
+  message(STATUS "CUDA compiler: ${CMAKE_CUDA_COMPILER}, version: "
+                  "${CMAKE_CUDA_COMPILER_ID} ${CMAKE_CUDA_COMPILER_VERSION}")
+  include(${PROJECT_SOURCE_DIR}/cmake/cuda.cmake)
+endif()
+
+if(WITH_OPENCL)
+  add_definitions(-DWITH_OPENCL)
+endif()
+
+if(ENABLE_TRT_BACKEND)
+  set(ENABLE_PADDLE2ONNX ON)
+  if(APPLE OR IOS)
+    message(FATAL_ERROR "Cannot enable tensorrt backend in mac/ios os, please set -DENABLE_TRT_BACKEND=OFF.")
+  endif()
+  if(NOT WITH_GPU)
+    message(FATAL_ERROR "While -DENABLE_TRT_BACKEND=ON, must set -DWITH_GPU=ON, but now it's OFF")
+  endif()
+  if(NOT BUILD_ON_JETSON)
+    if(NOT TRT_DIRECTORY)
+      set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
+      set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
+    endif()
+  endif()
+  if(BUILD_ON_JETSON)
+    set(TRT_INC_DIR /usr/include/aarch64-linux-gnu/)
+    set(TRT_LIB_DIR /usr/lib/aarch64-linux-gnu/)
+  else()
+    set(TRT_INC_DIR /usr/include/x86_64-linux-gnu/)
+    set(TRT_LIB_DIR /usr/lib/x86_64-linux-gnu/)
+    if(TRT_DIRECTORY)
+      set(TRT_INC_DIR ${TRT_DIRECTORY}/include)
+      set(TRT_LIB_DIR ${TRT_DIRECTORY}/lib)
+    endif()
+  endif()
+
+  add_definitions(-DENABLE_TRT_BACKEND)
+  include_directories(${TRT_INC_DIR})
+  include_directories(${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/tensorrt/common)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TRT_SRCS})
+  find_library(TRT_INFER_LIB nvinfer ${TRT_LIB_DIR} NO_DEFAULT_PATH)
+  find_library(TRT_ONNX_LIB nvonnxparser ${TRT_LIB_DIR} NO_DEFAULT_PATH)
+  find_library(TRT_PLUGIN_LIB nvinfer_plugin ${TRT_LIB_DIR} NO_DEFAULT_PATH)
+  list(APPEND DEPEND_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
+
+  if(NOT BUILD_ON_JETSON AND TRT_DIRECTORY)
+    if(NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
+      file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt")
+    endif()
+    if(EXISTS "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
+      file(REMOVE_RECURSE "${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib")
+    endif()
+
+    if (NOT Python_EXECUTABLE)
+      find_package(Python COMPONENTS Interpreter Development REQUIRED)
+    endif()
+
+    message(STATUS "Copying ${TRT_DIRECTORY}/lib to ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib ...")
+    execute_process(COMMAND ${Python_EXECUTABLE} ${PROJECT_SOURCE_DIR}/scripts/copy_directory.py ${TRT_DIRECTORY}/lib ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib)
+    file(GLOB_RECURSE TRT_STATIC_LIBS ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib/*.a)
+    if(TRT_STATIC_LIBS)
+      file(REMOVE ${TRT_STATIC_LIBS})
+    endif()
+    if(UNIX AND (NOT APPLE))
+      execute_process(COMMAND sh -c "ls *.so*" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib
+	      COMMAND sh -c "xargs ${PATCHELF_EXE} --force-rpath --set-rpath '$ORIGIN'" WORKING_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/tensorrt/lib
+    	          RESULT_VARIABLE result
+                      OUTPUT_VARIABLE curr_out
+                      ERROR_VARIABLE  curr_out)
+      if(ret EQUAL "1")
+	     message(FATAL_ERROR "Failed to patchelf tensorrt libraries.")
+      endif()
+      message(STATUS "result:${result} out:${curr_out}")
+    endif()
+  endif()
+endif()
+
+if(ENABLE_VISION)
+  add_definitions(-DENABLE_VISION)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_VISION_SRCS})
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_PIPELINE_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/opencv.cmake)
+
+  if(ENABLE_FLYCV)
+    add_definitions(-DENABLE_FLYCV)
+    include(${PROJECT_SOURCE_DIR}/cmake/flycv.cmake)
+    list(APPEND DEPEND_LIBS ${FLYCV_LIBRARIES})
+  endif()
+
+  if(ENABLE_CVCUDA)
+    include(${PROJECT_SOURCE_DIR}/cmake/cvcuda.cmake)
+    add_definitions(-DENABLE_CVCUDA)
+    list(APPEND DEPEND_LIBS nvcv_types cvcuda)
+  endif()
+endif()
+
+download_yaml_cpp()
+add_subdirectory(${PROJECT_SOURCE_DIR}/third_party/yaml-cpp)
+list(APPEND DEPEND_LIBS yaml-cpp)
+include_directories(${PROJECT_SOURCE_DIR}/third_party/yaml-cpp/include)
+
+if(ENABLE_TEXT)
+  add_definitions(-DENABLE_TEXT)
+  list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TEXT_SRCS})
+  include(${PROJECT_SOURCE_DIR}/cmake/fast_tokenizer.cmake)
+endif()
+
+if(ENABLE_PADDLE2ONNX)
+  add_definitions(-DENABLE_PADDLE2ONNX)
+  if(BUILD_PADDLE2ONNX)
+    download_protobuf()
+    download_onnx()
+    download_optimizer()
+    include(${PROJECT_SOURCE_DIR}/cmake/build_paddle2onnx.cmake)
+    list(APPEND ALL_DEPLOY_SRCS ${PADDLE2ONNX_ALL_SRCS})
+    list(APPEND DEPEND_LIBS p2o_paddle_proto onnx)
+  else()
+    include(${PROJECT_SOURCE_DIR}/cmake/paddle2onnx.cmake)
+    list(APPEND DEPEND_LIBS external_paddle2onnx)
+  endif()
+endif(ENABLE_PADDLE2ONNX)
+
+if(WITH_CAPI)
+  include(${PROJECT_SOURCE_DIR}/c_api/CMakeLists.txt)
+  if(MSVC)
+  add_definitions(-DFD_CAPI)
+  endif()
+endif()
+
+if(WITH_CSHARPAPI)
+  if(MSVC)
+  add_subdirectory(${PROJECT_SOURCE_DIR}/csharp)
+  endif()
+endif()
+
+configure_file(${PROJECT_SOURCE_DIR}/UltraInfer.cmake.in ${PROJECT_SOURCE_DIR}/UltraInfer.cmake @ONLY)
+configure_file(${PROJECT_SOURCE_DIR}/UltraInferCSharp.cmake.in ${PROJECT_SOURCE_DIR}/UltraInferCSharp.cmake @ONLY)
+if(BUILD_FD_TRITON_BACKEND)
+  configure_file(${PROJECT_SOURCE_DIR}/python/ultra_infer/c_lib_wrap.py.in ${PROJECT_SOURCE_DIR}/python/ultra_infer/c_lib_wrap.py)
+else()
+  configure_file(${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/c_lib_wrap.py.in ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/c_lib_wrap.py)
+endif()
+configure_file(${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py.in ${PROJECT_SOURCE_DIR}/python/scripts/process_libraries.py)
+
+list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_PYBIND_SRCS})
+
+add_library(${LIBRARY_NAME} SHARED ${ALL_DEPLOY_SRCS})
+
+redefine_file_macro(${LIBRARY_NAME})
+
+file(READ "${PROJECT_SOURCE_DIR}/VERSION_NUMBER" ULTRAINFER_VERSION)
+string(STRIP "${ULTRAINFER_VERSION}" ULTRAINFER_VERSION)
+if (APPLE)
+  set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
+elseif(MSVC)
+else()
+  if(WITH_GPU)
+    set_target_properties(${LIBRARY_NAME} PROPERTIES CUDA_SEPARABLE_COMPILATION ON)
+    set_target_properties(${LIBRARY_NAME} PROPERTIES INTERFACE_COMPILE_OPTIONS
+       "$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CXX>>:-fvisibility=hidden>$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CUDA>>:-Xcompiler=-fvisibility=hidden>")
+  else()
+    set_target_properties(${LIBRARY_NAME} PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
+  endif()
+  set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS "-Wl,--exclude-libs,ALL")
+  set_target_properties(${LIBRARY_NAME} PROPERTIES LINK_FLAGS_RELEASE -s)
+endif()
+
+set_target_properties(${LIBRARY_NAME} PROPERTIES VERSION ${ULTRAINFER_VERSION})
+if(MSVC)
+  # disable warnings for dll export
+  target_compile_options(${LIBRARY_NAME} PRIVATE "$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CXX>>:/wd4251>$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CUDA>>:-Xcompiler=/wd4251>")
+  file(GLOB FD_FILES_REQUIRE_BIGOBJ ${CSRCS_DIR_NAME}/ultra_infer/function/reduce.cc)
+  set_source_files_properties(${FD_FILES_REQUIRE_BIGOBJ} PROPERTIES COMPILE_FLAGS "/bigobj")
+endif()
+
+target_link_libraries(${LIBRARY_NAME} ${DEPEND_LIBS})
+
+##################################### Examples ####################################
+if(WIN32)
+  if("${CMAKE_GENERATOR}" STREQUAL "Ninja")
+    add_custom_target(copy_yaml_library ALL COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_BINARY_DIR}/third_party/yaml-cpp  ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/yaml-cpp/lib DEPENDS ${LIBRARY_NAME})
+  else()
+    add_custom_target(copy_yaml_library ALL COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_BINARY_DIR}/third_party/yaml-cpp/Release  ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/yaml-cpp/lib DEPENDS ${LIBRARY_NAME})
+    add_custom_target(copy_yaml_include ALL COMMAND ${CMAKE_COMMAND} -E copy_directory ${PROJECT_SOURCE_DIR}/third_party/yaml-cpp/include  ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install/yaml-cpp/include DEPENDS ${LIBRARY_NAME})
+  endif()
+endif()
+
+# add examples after prepare include paths for third-parties
+if(BUILD_EXAMPLES AND EXISTS ${PROJECT_SOURCE_DIR}/examples)
+  add_definitions(-DBUILD_EXAMPLES)
+  if(NOT EXECUTABLE_OUTPUT_PATH STREQUAL ${CMAKE_CURRENT_BINARY_DIR}/bin)
+    set(EXECUTABLE_OUTPUT_PATH ${CMAKE_CURRENT_BINARY_DIR}/bin)
+  endif()
+  include(${PROJECT_SOURCE_DIR}/cmake/gflags.cmake)
+  add_subdirectory(examples)
+endif()
+
+if (WITH_TESTING AND EXISTS ${PROJECT_SOURCE_DIR}/tests)
+  add_definitions(-DWITH_TESTING)
+  include(${PROJECT_SOURCE_DIR}/cmake/gtest.cmake)
+  if(NOT BUILD_EXAMPLES)
+    include(${PROJECT_SOURCE_DIR}/cmake/gflags.cmake)
+  endif()
+  include(${PROJECT_SOURCE_DIR}/cmake/glog.cmake)
+  add_subdirectory(tests)
+endif()
+
+include(${PROJECT_SOURCE_DIR}/cmake/summary.cmake)
+ultra_infer_summary()
+
+################################ Installation: UltraInfer C++ SDK ###############################
+if(WIN32)
+  install(
+    TARGETS ${LIBRARY_NAME}
+    LIBRARY DESTINATION lib
+    ARCHIVE DESTINATION lib
+    RUNTIME DESTINATION lib
+  )
+else()
+  install(
+    TARGETS ${LIBRARY_NAME}
+    LIBRARY DESTINATION lib)
+endif()
+
+install(
+  DIRECTORY ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer
+  DESTINATION ${CMAKE_INSTALL_PREFIX}/include
+  FILES_MATCHING
+  PATTERN "*.h"
+  PATTERN "${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/runtime/backends/*/*.h"
+)
+
+install(
+  DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install
+  DESTINATION ${CMAKE_INSTALL_PREFIX}/third_libs
+)
+
+install(
+  FILES
+  ${PROJECT_SOURCE_DIR}/LICENSE
+  ${PROJECT_SOURCE_DIR}/ThirdPartyNotices.txt
+  ${PROJECT_SOURCE_DIR}/VERSION_NUMBER
+  ${PROJECT_SOURCE_DIR}/UltraInfer.cmake
+  ${PROJECT_SOURCE_DIR}/UltraInferCSharp.cmake
+  ${PROJECT_SOURCE_DIR}/cmake/UltraInferConfig.cmake
+  ${PROJECT_SOURCE_DIR}/cmake/utils.cmake
+  ${PROJECT_SOURCE_DIR}/cmake/summary.cmake
+  DESTINATION ${CMAKE_INSTALL_PREFIX}
+)
+
+install(
+  FILES ${PROJECT_SOURCE_DIR}/cmake/gflags.cmake
+  DESTINATION ${CMAKE_INSTALL_PREFIX}/utils
+)
+
+if(NOT WIN32)
+  install(
+    FILES ${PROJECT_SOURCE_DIR}/scripts/ultra_infer_init.sh
+    DESTINATION ${CMAKE_INSTALL_PREFIX}
+  )
+else()
+  install(
+    FILES ${PROJECT_SOURCE_DIR}/scripts/ultra_infer_init.bat
+    DESTINATION ${CMAKE_INSTALL_PREFIX}
+  )
+endif()
+
+if(WITH_ASCEND)
+  install(
+    FILES ${PROJECT_SOURCE_DIR}/scripts/ascend_init.sh
+    DESTINATION ${CMAKE_INSTALL_PREFIX}
+  )
+endif()
+
+if(WITH_CAPI)
+  install(
+    DIRECTORY ${PROJECT_SOURCE_DIR}/c_api/ultra_infer_capi
+    DESTINATION ${CMAKE_INSTALL_PREFIX}/include
+    FILES_MATCHING
+    PATTERN "*.h"
+    PATTERN "*/types_internal.h" EXCLUDE
+  )
+endif()
+
+include(${PROJECT_SOURCE_DIR}/cmake/config_cpack.cmake)
+
+if(WIN32 AND BUILD_EXAMPLES)
+  get_windows_path(_tmp_install_dir ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install)
+  get_windows_path(_publish_exe_dir ${EXECUTABLE_OUTPUT_PATH}/Release)
+  list(GET CMAKE_CONFIGURATION_TYPES 0 _CONFIG_TYPE)
+  if((${CMAKE_BUILD_TYPE} MATCHES "Release") OR (${_CONFIG_TYPE} MATCHES "Release"))
+    install(TARGETS ${LIBRARY_NAME} RUNTIME DESTINATION ${EXECUTABLE_OUTPUT_PATH}/Release)
+    add_custom_target(
+      copy_fd_third_dlls_examples ALL COMMAND
+      cmd /C ${PROJECT_SOURCE_DIR}/scripts/ultra_infer_init.bat install ${_tmp_install_dir} ${_publish_exe_dir} noconfirm)
+    add_dependencies(copy_fd_third_dlls_examples ${LIBRARY_NAME} copy_yaml_library)
+  endif()
+endif()
+
+############################### Building: UltraInfer Python Wheel #############################
+if(BUILD_ULTRAINFER_PYTHON)
+  add_definitions(-DBUILD_ULTRAINFER_PYTHON)
+  if("${PY_EXT_SUFFIX}" STREQUAL "")
+    if(MSVC)
+      set(PY_EXT_SUFFIX ".pyd")
+    else()
+      set(PY_EXT_SUFFIX ".so")
+    endif()
+  endif()
+
+  # find_package Python has replaced PythonInterp and PythonLibs since cmake 3.12
+  # Use the following command in the future; now this is only compatible with the latest pybind11
+  # find_package(Python ${PY_VERSION} COMPONENTS Interpreter Development REQUIRED)
+  find_package(PythonInterp ${PY_VERSION} REQUIRED)
+  find_package(PythonLibs ${PY_VERSION})
+  if(CMAKE_SYSTEM_NAME STREQUAL "AIX")
+    set(CMAKE_NO_SYSTEM_FROM_IMPORTED 1)
+  endif()
+
+  if(NOT ENABLE_VISION)
+    file(GLOB_RECURSE VISION_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/vision/*_pybind.cc)
+    file(GLOB_RECURSE PIPELINE_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/pipeline/*_pybind.cc)
+    list(REMOVE_ITEM DEPLOY_PYBIND_SRCS ${VISION_PYBIND_SRCS} ${PIPELINE_PYBIND_SRCS})
+  endif()
+
+  if (NOT ENABLE_TEXT)
+    file(GLOB_RECURSE TEXT_PYBIND_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/ultra_infer/text/*_pybind.cc)
+    list(REMOVE_ITEM DEPLOY_PYBIND_SRCS ${TEXT_PYBIND_SRCS})
+  endif()
+
+  add_library(${PY_LIBRARY_NAME} MODULE ${DEPLOY_PYBIND_SRCS})
+  redefine_file_macro(${PY_LIBRARY_NAME})
+  set_target_properties(${PY_LIBRARY_NAME} PROPERTIES PREFIX "")
+  set_target_properties(${PY_LIBRARY_NAME}
+                        PROPERTIES COMPILE_FLAGS "-fvisibility=hidden")
+  set_target_properties(${PY_LIBRARY_NAME} PROPERTIES SUFFIX ${PY_EXT_SUFFIX})
+  set_target_properties(${PY_LIBRARY_NAME}
+                        PROPERTIES LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR})
+  target_include_directories(${PY_LIBRARY_NAME} PRIVATE
+                             $<BUILD_INTERFACE:${CMAKE_CURRENT_BINARY_DIR}>
+                             $<INSTALL_INTERFACE:include>
+                             ${PYTHON_INCLUDE_DIR})
+
+  download_pybind()
+  target_include_directories(${PY_LIBRARY_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/third_party/pybind11/include)
+  download_dlpack()
+  target_include_directories(${PY_LIBRARY_NAME} PUBLIC ${PROJECT_SOURCE_DIR}/third_party/dlpack/include)
+
+  if(APPLE)
+    set_target_properties(${PY_LIBRARY_NAME}
+                          PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
+  endif()
+
+  target_link_libraries(${PY_LIBRARY_NAME} PUBLIC ${LIBRARY_NAME})
+
+  if(MSVC)
+    target_link_libraries(${PY_LIBRARY_NAME} PRIVATE ${PYTHON_LIBRARIES})
+    target_compile_options(${PY_LIBRARY_NAME}
+                           PRIVATE /MP
+                                   /wd4244 # 'argument': conversion from 'google::
+                                           # protobuf::uint64' to 'int', possible
+                                           # loss of data
+                                   /wd4267 # Conversion from 'size_t' to 'int',
+                                           # possible loss of data
+                                   /wd4996 # The second parameter is ignored.
+                                   ${EXTRA_FLAGS})
+    target_compile_options(${PY_LIBRARY_NAME} PRIVATE $<$<NOT:$<CONFIG:Debug>>:/MT> $<$<CONFIG:Debug>:/MTd>)
+  endif()
+
+  file(REMOVE_RECURSE ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs)
+  file(MAKE_DIRECTORY ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs)
+
+  if(WIN32)
+    add_custom_target(copy_fd_libraries ALL COMMAND ${CMAKE_COMMAND} -E copy_directory   ${CMAKE_CURRENT_BINARY_DIR}/Release ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs/ DEPENDS ${PY_LIBRARY_NAME})
+  elseif(APPLE)
+    add_custom_target(copy_fd_libraries ALL COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/*.so** ${CMAKE_CURRENT_BINARY_DIR}/*.dylib** ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs/ DEPENDS ${PY_LIBRARY_NAME})
+  else()
+    add_custom_target(copy_fd_libraries ALL COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_BINARY_DIR}/*.so* ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs/ DEPENDS ${PY_LIBRARY_NAME})
+  endif()
+  add_custom_target(copy_third_libraries ALL COMMAND ${CMAKE_COMMAND} -E copy_directory ${CMAKE_CURRENT_BINARY_DIR}/third_libs/install ${PROJECT_SOURCE_DIR}/python/${LIBRARY_NAME}/libs/third_libs DEPENDS ${PY_LIBRARY_NAME})
+endif(BUILD_ULTRAINFER_PYTHON)
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+  if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0")
+    string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION)
+    message(FATAL_ERROR "[ERROR] UltraInfer require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
+  endif()
+endif()

+ 0 - 0
libs/ultrainfer/LICENSE → libs/ultra_infer/LICENSE


+ 0 - 0
libs/ultrainfer/ThirdPartyNotices.txt → libs/ultra_infer/ThirdPartyNotices.txt


+ 335 - 0
libs/ultra_infer/UltraInfer.cmake.in

@@ -0,0 +1,335 @@
+CMAKE_MINIMUM_REQUIRED(VERSION 3.8)
+
+# UltraInfer basic infos
+set(ULTRAINFER_VERSION @ULTRAINFER_VERSION@)
+set(LIBRARY_NAME @LIBRARY_NAME@)
+
+# If compile with GLIBC_CXX_ABI=0
+set(NEED_ABI0 @NEED_ABI0@)
+
+# Hardware and Language API
+set(WITH_GPU @WITH_GPU@)
+set(WITH_IPU @WITH_IPU@)
+set(WITH_OPENCL @WITH_OPENCL@)
+set(WITH_ASCEND @WITH_ASCEND@)
+set(WITH_DIRECTML @WITH_DIRECTML@)
+set(WITH_TIMVX @WITH_TIMVX@)
+set(WITH_KUNLUNXIN @WITH_KUNLUNXIN@)
+set(WITH_CAPI @WITH_CAPI@)
+set(WITH_CSHARPAPI @WITH_CSHARPAPI@)
+set(WITH_TESTING @WITH_TESTING@)
+set(BUILD_ON_JETSON @BUILD_ON_JETSON@)
+set(RKNN2_TARGET_SOC "@RKNN2_TARGET_SOC@")
+
+# Inference backend and UltraInfer Moudle
+set(ENABLE_ORT_BACKEND @ENABLE_ORT_BACKEND@)
+set(ENABLE_RKNPU2_BACKEND @ENABLE_RKNPU2_BACKEND@)
+set(ENABLE_TVM_BACKEND @ENABLE_TVM_BACKEND@)
+set(ENABLE_HORIZON_BACKEND @ENABLE_HORIZON_BACKEND@)
+set(ENABLE_SOPHGO_BACKEND @ENABLE_SOPHGO_BACKEND@)
+set(ENABLE_LITE_BACKEND @ENABLE_LITE_BACKEND@)
+set(ENABLE_PADDLE_BACKEND @ENABLE_PADDLE_BACKEND@)
+set(ENABLE_OPENVINO_BACKEND @ENABLE_OPENVINO_BACKEND@)
+set(ENABLE_POROS_BACKEND @ENABLE_POROS_BACKEND@)
+set(ENABLE_TRT_BACKEND @ENABLE_TRT_BACKEND@)
+set(ENABLE_PADDLE2ONNX @ENABLE_PADDLE2ONNX@)
+set(BUILD_PADDLE2ONNX @BUILD_PADDLE2ONNX@)
+
+set(ENABLE_VISION @ENABLE_VISION@)
+set(ENABLE_FLYCV @ENABLE_FLYCV@)
+set(ENABLE_CVCUDA @ENABLE_CVCUDA@)
+set(ENABLE_TEXT @ENABLE_TEXT@)
+set(ENABLE_BENCHMARK @ENABLE_BENCHMARK@)
+
+# Version infos and custom settings for third libs
+set(PADDLEINFERENCE_VERSION @PADDLEINFERENCE_VERSION@)
+set(POROS_VERSION @POROS_VERSION@)
+set(OPENVINO_VERSION @OPENVINO_VERSION@)
+set(OPENCV_FILENAME @OPENCV_FILENAME@)
+set(OPENVINO_FILENAME @OPENVINO_FILENAME@)
+set(PADDLELITE_FILENAME @PADDLELITE_FILENAME@)
+set(OPENCV_DIRECTORY "@OPENCV_DIRECTORY@")
+set(ORT_DIRECTORY "@ORT_DIRECTORY@")
+set(OPENVINO_DIRECTORY "@OPENVINO_DIRECTORY@")
+
+set(ULTRAINFER_LIBS "")
+set(ULTRAINFER_INCS "")
+list(APPEND ULTRAINFER_INCS ${CMAKE_CURRENT_LIST_DIR}/include)
+
+# Note(zhoushunjie): include some useful utils function
+include(${CMAKE_CURRENT_LIST_DIR}/utils.cmake)
+
+# Set C++11 as standard for the whole project
+if(NOT MSVC)
+  set(CMAKE_CXX_STANDARD 11)
+  set(CMAKE_CXX_FLAGS "-Wno-format")
+  if(NEED_ABI0)
+    add_definitions(-D_GLIBCXX_USE_CXX11_ABI=0)
+  else()
+    add_definitions(-D_GLIBCXX_USE_CXX11_ABI=1)
+  endif()
+endif(NOT MSVC)
+
+# Still need omp while using UltraInfer static lib.
+# This is due to the use of openmp for Paddle Lite's
+# static library.
+
+find_library(FDLIB ${LIBRARY_NAME} ${CMAKE_CURRENT_LIST_DIR}/lib NO_DEFAULT_PATH)
+list(APPEND ULTRAINFER_LIBS ${FDLIB})
+
+if(ENABLE_ORT_BACKEND)
+  if (ORT_DIRECTORY)
+    set(ORT_LIB_PATH ${ORT_DIRECTORY}/lib)
+  else()
+    set(ORT_LIB_PATH ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/onnxruntime/lib)
+  endif()
+  message(STATUS "The path of ONNXRuntime is ${ORT_LIB_PATH}.")
+  find_library(ORT_LIB onnxruntime ${ORT_LIB_PATH}  NO_DEFAULT_PATH)
+  list(APPEND ULTRAINFER_LIBS ${ORT_LIB})
+endif()
+
+if(ENABLE_TVM_BACKEND)
+    if(APPLE)
+        set(TVM_RUNTIME_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tvm/lib/libtvm_runtime.dylib)
+    else()
+        set(TVM_RUNTIME_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tvm/lib/libtvm_runtime.so)
+    endif()
+    list(APPEND ULTRAINFER_LIBS ${TVM_RUNTIME_LIB})
+endif()
+
+if(ENABLE_PADDLE_BACKEND)
+  string(REGEX MATCH "([0-9]+)\\.([0-9]+)\\.([0-9]+)" _ "${PADDLEINFERENCE_VERSION}")
+  set(PADDLEINFERENCE_VERSION_MAJOR "${CMAKE_MATCH_1}")
+  set(PADDLEINFERENCE_VERSION_MINOR "${CMAKE_MATCH_2}")
+  set(PADDLEINFERENCE_VERSION_PATCH "${CMAKE_MATCH_3}")
+  find_library(PADDLE_LIB paddle_inference ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/paddle/lib NO_DEFAULT_PATH)
+  if(WIN32)
+    if(PADDLEINFERENCE_VERSION_MAJOR EQUAL 2)
+      set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mkldnn/lib/mkldnn.lib")
+    else()
+      set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/onednn/lib/dnnl.lib")
+    endif()
+    set(IOMP_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mklml/lib/libiomp5md.lib")
+  elseif(APPLE)
+    message(STATUS "No third parties libs(mkldnn and omp) need to link into paddle_inference on MacOS OSX.")
+  else()
+    if(PADDLEINFERENCE_VERSION_MAJOR EQUAL 2)
+      set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mkldnn/lib/libmkldnn.so.0")
+    else()
+      set(DNNL_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/onednn/lib/libdnnl.so.3")
+    endif()
+    set(IOMP_LIB "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/third_party/install/mklml/lib/libiomp5.so")
+  endif()
+  list(APPEND ULTRAINFER_LIBS ${PADDLE_LIB})
+  if(EXISTS "${DNNL_LIB}")
+    list(APPEND ULTRAINFER_LIBS ${DNNL_LIB} ${IOMP_LIB})
+  endif()
+endif()
+
+if(ENABLE_OPENVINO_BACKEND)
+  if (OPENVINO_DIRECTORY)
+    set(OPENVINO_DIR ${OPENVINO_DIRECTORY})
+  else()
+    set(OPENVINO_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/${OPENVINO_FILENAME}/runtime)
+  endif()
+  get_openvino_libs(${OPENVINO_DIR})
+  message(STATUS "OPENVINO_LIBS = ${OPENVINO_LIBS}")
+  list(APPEND ULTRAINFER_LIBS ${OPENVINO_LIBS})
+endif()
+
+if(ENABLE_RKNPU2_BACKEND)
+  if(RKNN2_TARGET_SOC STREQUAL "RK356X")
+    set(RKNPU2_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/rknpu2_runtime/lib/librknnrt.so)
+  elseif (RKNN2_TARGET_SOC STREQUAL "RK3588")
+    set(RKNPU2_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/rknpu2_runtime/lib/librknnrt.so)
+  else ()
+    message(FATAL_ERROR "RKNN2_TARGET_SOC is not set, ref value: RK356X or RK3588")
+  endif()
+  message(STATUS "The path of RKNPU2 is ${RKNPU2_LIB}.")
+  list(APPEND ULTRAINFER_LIBS ${RKNPU2_LIB})
+endif()
+
+if(ENABLE_HORIZON_BACKEND)
+  set(DNN_PATH ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/dnn)
+  set(APPSDK_PATH ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/appsdk/appuser/)
+
+  set(DNN_LIB_PATH ${DNN_PATH}/lib)
+  set(APPSDK_LIB_PATH ${APPSDK_PATH}/lib/hbbpu)
+  set(BPU_libs dnn cnn_intf hbrt_bernoulli_aarch64)
+
+  link_directories(${DNN_LIB_PATH}
+                  ${APPSDK_PATH}/lib/hbbpu
+                  ${APPSDK_PATH}/lib)
+
+  list(APPEND ULTRAINFER_LIBS ${BPU_libs})
+
+endif()
+if(ENABLE_LITE_BACKEND)
+  set(LITE_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/${PADDLELITE_FILENAME})
+  # Linux/Mac/Win/...
+  find_library(LITE_LIB paddle_full_api_shared ${LITE_DIR}/lib NO_DEFAULT_PATH)
+  list(APPEND ULTRAINFER_LIBS ${LITE_LIB})
+endif()
+
+if(ENABLE_POROS_BACKEND)
+  find_library(POROS_LIB poros ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/poros/lib NO_DEFAULT_PATH)
+  find_library(TORCH_LIB torch ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/torch/lib NO_DEFAULT_PATH)
+  set(TORCH_INCLUDE "${CMAKE_CURRENT_LIST_DIR}/third_libs/install/torch/include")
+  list(APPEND ULTRAINFER_LIBS ${POROS_LIB} ${TORCH_LIB})
+  list(APPEND ULTRAINFER_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/poros/include ${TORCH_INCLUDE})
+endif()
+
+if(WITH_GPU)
+  if(NOT CUDA_DIRECTORY)
+    set(CUDA_DIRECTORY "/usr/local/cuda")
+  endif()
+  if(WIN32)
+    find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib/x64)
+    find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib/x64)
+  else()
+    find_library(CUDA_LIB cudart ${CUDA_DIRECTORY}/lib64)
+    if(NOT BUILD_ON_JETSON)
+      find_library(NVJPEG_LIB nvjpeg ${CUDA_DIRECTORY}/lib64)
+    endif()
+  endif()
+  if(NOT CUDA_LIB)
+    message(FATAL_ERROR "[UltraInfer] Cannot find library cudart in ${CUDA_DIRECTORY}, Please define CUDA_DIRECTORY, e.g -DCUDA_DIRECTORY=/path/to/cuda")
+  endif()
+  list(APPEND ULTRAINFER_LIBS ${CUDA_LIB} ${NVJPEG_LIB})
+  list(APPEND ULTRAINFER_INCS ${CUDA_DIRECTORY}/include)
+
+  if(ENABLE_TRT_BACKEND)
+    if(BUILD_ON_JETSON)
+      find_library(TRT_INFER_LIB nvinfer /usr/lib/aarch64-linux-gnu/)
+      find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/aarch64-linux-gnu/)
+      find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/aarch64-linux-gnu/)
+    else()
+      if(EXISTS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/)
+        find_library(TRT_INFER_LIB nvinfer ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+        find_library(TRT_ONNX_LIB nvonnxparser ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+        find_library(TRT_PLUGIN_LIB nvinfer_plugin ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tensorrt/lib NO_DEFAULT_PATH)
+      else()
+        find_library(TRT_INFER_LIB nvinfer /usr/lib/x86_64-linux-gnu/)
+        find_library(TRT_ONNX_LIB nvonnxparser /usr/lib/x86_64-linux-gnu/)
+        find_library(TRT_PLUGIN_LIB nvinfer_plugin /usr/lib/x86_64-linux-gnu/)
+      endif()
+    endif()
+    list(APPEND ULTRAINFER_LIBS ${TRT_INFER_LIB} ${TRT_ONNX_LIB} ${TRT_PLUGIN_LIB})
+  endif()
+endif()
+
+if(ENABLE_VISION)
+  if(OPENCV_DIRECTORY)
+    set(OpenCV_DIR ${OPENCV_DIRECTORY})
+  else()
+    set(OpenCV_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/${OPENCV_FILENAME})
+    if(WIN32)
+      set(OpenCV_DIR ${OpenCV_DIR}/build)
+    endif()
+  endif()
+  message(STATUS "The path of OpenCV is ${OpenCV_DIR}.")
+
+  # Win/Linux/Mac
+  find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR} NO_DEFAULT_PATH)
+  list(APPEND ULTRAINFER_INCS ${OpenCV_INCLUDE_DIRS})
+  list(APPEND ULTRAINFER_LIBS ${OpenCV_LIBS})
+
+  if(ENABLE_FLYCV)
+    include_directories(${CMAKE_CURRENT_LIST_DIR}/third_libs/install/flycv/include)
+    set(FLYCV_LIB_DIR ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/flycv/lib)
+
+    find_library(FLYCV_LIB flycv_shared ${FLYCV_LIB_DIR} NO_DEFAULT_PATH)
+    list(APPEND ULTRAINFER_LIBS ${FLYCV_LIB})
+  endif()
+
+  if(ENABLE_CVCUDA)
+    find_library(CVCUDA_LIB cvcuda ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/cvcuda/lib NO_DEFAULT_PATH)
+    find_library(NVCV_TYPES_LIB nvcv_types ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/cvcuda/lib NO_DEFAULT_PATH)
+    list(APPEND ULTRAINFER_LIBS ${CVCUDA_LIB} ${NVCV_TYPES_LIB})
+    list(APPEND ULTRAINFER_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/cvcuda/include NO_DEFAULT_PATH)
+    add_definitions(-DENABLE_CVCUDA)
+  endif()
+
+endif()
+
+if (ENABLE_TEXT)
+  # Add dependency libs later: Linux/Mac/Win/...
+  find_library(FAST_TOKENIZER_LIB core_tokenizers ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/lib NO_DEFAULT_PATH)
+  list(APPEND ULTRAINFER_LIBS ${FAST_TOKENIZER_LIB})
+
+  list(APPEND ULTRAINFER_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/include)
+  list(APPEND ULTRAINFER_INCS ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/fast_tokenizer/third_party/include)
+endif()
+
+if(ENABLE_PADDLE2ONNX)
+  if(NOT BUILD_PADDLE2ONNX)
+    find_library(PADDLE2ONNX_LIB paddle2onnx  ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle2onnx/lib NO_DEFAULT_PATH)
+    list(APPEND ULTRAINFER_LIBS ${PADDLE2ONNX_LIB})
+  endif()
+endif()
+
+if(WITH_KUNLUNXIN)
+  list(APPEND ULTRAINFER_LIBS -lpthread -lrt -ldl)
+endif()
+
+remove_duplicate_libraries(ULTRAINFER_LIBS)
+
+include(${CMAKE_CURRENT_LIST_DIR}/summary.cmake)
+ultra_infer_summary()
+message(STATUS "  DEPENDENCY_LIBS           : ${ULTRAINFER_LIBS}")
+
+if (CMAKE_CXX_COMPILER_ID STREQUAL "GNU")
+  if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS "5.4.0")
+    string(STRIP "${CMAKE_CXX_COMPILER_VERSION}" CMAKE_CXX_COMPILER_VERSION)
+    message(FATAL_ERROR "[ERROR] UltraInfer require g++ version >= 5.4.0, but now your g++ version is ${CMAKE_CXX_COMPILER_VERSION}, this may cause failure! Use -DCMAKE_CXX_COMPILER to define path of your compiler.")
+  endif()
+endif()
+
+function(install_ultra_infer_libraries DESTINATION_DIR)
+  set(DYN_LIB_SUFFIX "*.so*")
+  if(WIN32)
+    set(DYN_LIB_SUFFIX "*.dll")
+  elseif(APPLE)
+    set(DYN_LIB_SUFFIX "*.dylib*")
+  endif()
+  if(UltraInfer_DIR)
+    set(DYN_SEARCH_DIR ${UltraInfer_DIR})
+  elseif(ULTRAINFER_INSTALL_DIR)
+    set(DYN_SEARCH_DIR ${ULTRAINFER_INSTALL_DIR})
+  else()
+    message(FATAL_ERROR "Please set UltraInfer_DIR/ULTRAINFER_INSTALL_DIR before call install_ultra_infer_libraries.")
+  endif()
+  file(GLOB_RECURSE ALL_NEED_DYN_LIBS ${DYN_SEARCH_DIR}/lib/${DYN_LIB_SUFFIX})
+  file(GLOB_RECURSE ALL_DEPS_DYN_LIBS ${DYN_SEARCH_DIR}/third_libs/${DYN_LIB_SUFFIX})
+
+  if(ENABLE_VISION)
+    # OpenCV
+    file(GLOB_RECURSE ALL_OPENCV_DYN_LIBS ${OpenCV_DIR}/${DYN_LIB_SUFFIX})
+    list(REMOVE_ITEM ALL_DEPS_DYN_LIBS ${ALL_OPENCV_DYN_LIBS})
+
+    if(WIN32)
+      file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/x64/vc15/bin/${DYN_LIB_SUFFIX})
+      file(INSTALL ${OPENCV_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
+    else() # linux/mac
+      file(GLOB OPENCV_DYN_LIBS ${OpenCV_DIR}/lib/${DYN_LIB_SUFFIX})
+      file(INSTALL ${OPENCV_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
+    endif()
+
+    # FlyCV
+    if(ENABLE_FLYCV)
+      file(GLOB_RECURSE ALL_FLYCV_DYN_LIBS ${FLYCV_LIB_DIR}/${DYN_LIB_SUFFIX})
+      list(REMOVE_ITEM ALL_DEPS_DYN_LIBS ${ALL_FLYCV_DYN_LIBS})
+    endif()
+  endif()
+
+  if(ENABLE_OPENVINO_BACKEND)
+    # need plugins.xml for openvino backend
+    set(OPENVINO_RUNTIME_BIN_DIR ${OPENVINO_DIR}/bin)
+    file(GLOB OPENVINO_PLUGIN_XML ${OPENVINO_RUNTIME_BIN_DIR}/*.xml)
+    file(INSTALL ${OPENVINO_PLUGIN_XML} DESTINATION ${DESTINATION_DIR})
+  endif()
+
+  # Install other libraries
+  file(INSTALL ${ALL_NEED_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
+  file(INSTALL ${ALL_DEPS_DYN_LIBS} DESTINATION ${DESTINATION_DIR})
+endfunction()

+ 13 - 0
libs/ultra_infer/UltraInferCSharp.cmake.in

@@ -0,0 +1,13 @@
+list(APPEND ULTRAINFER_DOTNET_REFERENCES
+    "Microsoft.CSharp"
+    "System"
+    "System.Core"
+    "System.Data"
+    "System.Deployment"
+    "System.Drawing"
+    "System.Net.Http"
+    "System.Xml"
+    "System.Reflection"
+    "${CMAKE_CURRENT_LIST_DIR}/csharp_lib/ultra_infer_csharp.dll")
+
+set(ULTRAINFER_PACKAGE_REFERENCES "OpenCvSharp4_4.7.0.20230115;OpenCvSharp4.runtime.win_4.7.0.20230115")

+ 0 - 0
libs/ultrainfer/VERSION_NUMBER → libs/ultra_infer/VERSION_NUMBER


+ 0 - 0
libs/ultrainfer/cmake/UltraInferConfig.cmake → libs/ultra_infer/cmake/UltraInferConfig.cmake


+ 0 - 0
libs/ultrainfer/cmake/ascend.cmake → libs/ultra_infer/cmake/ascend.cmake


+ 0 - 0
libs/ultrainfer/cmake/build_paddle2onnx.cmake → libs/ultra_infer/cmake/build_paddle2onnx.cmake


+ 0 - 0
libs/ultrainfer/cmake/build_tools.cmake → libs/ultra_infer/cmake/build_tools.cmake


+ 0 - 0
libs/ultrainfer/cmake/check.cmake → libs/ultra_infer/cmake/check.cmake


+ 38 - 0
libs/ultra_infer/cmake/config_cpack.cmake

@@ -0,0 +1,38 @@
+if(NOT UNIX)
+  return()
+endif()
+
+set(PACKAGE_SYS_VERSION "linux")
+if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
+  set(PACKAGE_SYS_VERSION "${PACKAGE_SYS_VERSION}-aarch64")
+else()
+  set(PACKAGE_SYS_VERSION "${PACKAGE_SYS_VERSION}-x64")
+endif()
+if(WITH_GPU)
+  set(PACKAGE_SYS_VERSION "${PACKAGE_SYS_VERSION}-gpu")
+endif()
+
+# set(CPACK_ERROR_ON_ABSOLUTE_INSTALL_DESTINATION ON)
+set(CPACK_VERBATIM_VARIABLES TRUE)
+set(CPACK_GENERATOR DEB RPM)
+set(CPACK_THREADS 0)
+set(CPACK_PACKAGE_CONTACT "ultra_infer@baidu.com")
+set(CPACK_PACKAGING_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}")
+set(CPACK_PACKAGE_VERSION "${ULTRAINFER_VERSION}")
+set(CPACK_PACKAGE_FILE_NAME "${PROJECT_NAME}-${PACKAGE_SYS_VERSION}-${ULTRAINFER_VERSION}")
+set(CPACK_PACKAGE_NAME "${PROJECT_NAME}")
+
+set(CPACK_DEBIAN_PACKAGE_CONTROL_STRICT_PERMISSION TRUE)
+configure_file(cpack/debian_postinst.in cpack/postinst @ONLY)
+configure_file(cpack/debian_prerm.in cpack/prerm @ONLY)
+set(CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA
+    "${CMAKE_CURRENT_BINARY_DIR}/cpack/postinst"
+    "${CMAKE_CURRENT_BINARY_DIR}/cpack/prerm")
+
+set(CPACK_RPM_PACKAGE_AUTOREQ FALSE)
+configure_file(cpack/rpm_postinst.in cpack/rpm_postinst @ONLY)
+configure_file(cpack/rpm_postrm.in cpack/rpm_postrm @ONLY)
+set(CPACK_RPM_POST_INSTALL_SCRIPT_FILE "${CMAKE_CURRENT_BINARY_DIR}/cpack/rpm_postinst")
+set(CPACK_RPM_POST_UNINSTALL_SCRIPT_FILE "${CMAKE_CURRENT_BINARY_DIR}/cpack/rpm_postrm")
+
+include(CPack)

+ 0 - 0
libs/ultrainfer/cmake/cuda.cmake → libs/ultra_infer/cmake/cuda.cmake


+ 0 - 0
libs/ultrainfer/cmake/cvcuda.cmake → libs/ultra_infer/cmake/cvcuda.cmake


+ 0 - 0
libs/ultrainfer/cmake/faiss.cmake → libs/ultra_infer/cmake/faiss.cmake


+ 0 - 0
libs/ultrainfer/cmake/fast_tokenizer.cmake → libs/ultra_infer/cmake/fast_tokenizer.cmake


+ 0 - 0
libs/ultrainfer/cmake/flycv.cmake → libs/ultra_infer/cmake/flycv.cmake


+ 89 - 0
libs/ultra_infer/cmake/gflags.cmake

@@ -0,0 +1,89 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+INCLUDE(ExternalProject)
+
+if(THIRD_PARTY_PATH)
+  SET(GFLAGS_PREFIX_DIR  ${THIRD_PARTY_PATH}/gflags)
+  SET(GFLAGS_INSTALL_DIR ${THIRD_PARTY_PATH}/install/gflags)
+else()
+  # For example cmake
+  SET(GFLAGS_PREFIX_DIR  ${ULTRAINFER_INSTALL_DIR}/installed_ultra_infer/cmake)
+  SET(GFLAGS_INSTALL_DIR ${ULTRAINFER_INSTALL_DIR}/installed_ultra_infer/cmake/gflags)
+endif()
+SET(GFLAGS_INCLUDE_DIR "${GFLAGS_INSTALL_DIR}/include" CACHE PATH "gflags include directory." FORCE)
+set(GFLAGS_SOURCE_FILE ${GFLAGS_PREFIX_DIR}/src/gflags.tgz CACHE PATH "gflags source file." FORCE)
+
+set(GFLAGS_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
+set(GFLAGS_URL ${GFLAGS_URL_PREFIX}/gflags.tgz)
+set(GFLAGS_CACHE_FILE ${CMAKE_CURRENT_LIST_DIR}/gflags.tgz)
+if(EXISTS ${GFLAGS_CACHE_FILE})
+  set(GFLAGS_URL ${GFLAGS_CACHE_FILE} CACHE PATH "gflags cache file." FORCE)
+  set(GFLAGS_SOURCE_FILE ${GFLAGS_CACHE_FILE} CACHE PATH "gflags source file." FORCE)
+endif()
+
+IF(WIN32)
+  set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/gflags_static.lib" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+ELSE(WIN32)
+  set(GFLAGS_LIBRARIES "${GFLAGS_INSTALL_DIR}/lib/libgflags.a" CACHE FILEPATH "GFLAGS_LIBRARIES" FORCE)
+  set(BUILD_COMMAND $(MAKE) --silent)
+  set(INSTALL_COMMAND $(MAKE) install)
+ENDIF(WIN32)
+
+INCLUDE_DIRECTORIES(${GFLAGS_INCLUDE_DIR})
+
+ExternalProject_Add(
+    extern_gflags
+    ${EXTERNAL_PROJECT_LOG_ARGS}
+    URL ${GFLAGS_URL}
+    PREFIX          ${GFLAGS_PREFIX_DIR}
+    UPDATE_COMMAND  ""
+    BUILD_COMMAND   ${BUILD_COMMAND}
+    INSTALL_COMMAND ${INSTALL_COMMAND}
+    CMAKE_ARGS      -DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}
+                    -DCMAKE_C_COMPILER=${CMAKE_C_COMPILER}
+                    -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
+                    -DCMAKE_CXX_FLAGS_RELEASE=${CMAKE_CXX_FLAGS_RELEASE}
+                    -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}
+                    -DCMAKE_C_FLAGS=${CMAKE_C_FLAGS}
+                    -DCMAKE_C_FLAGS_DEBUG=${CMAKE_C_FLAGS_DEBUG}
+                    -DCMAKE_C_FLAGS_RELEASE=${CMAKE_C_FLAGS_RELEASE}
+                    -DBUILD_STATIC_LIBS=ON
+                    -DCMAKE_INSTALL_PREFIX=${GFLAGS_INSTALL_DIR}
+                    -DCMAKE_POSITION_INDEPENDENT_CODE=ON
+                    -DBUILD_TESTING=OFF
+                    -DCMAKE_BUILD_TYPE=${THIRD_PARTY_BUILD_TYPE}
+                    ${EXTERNAL_OPTIONAL_ARGS}
+    CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${GFLAGS_INSTALL_DIR}
+                    -DCMAKE_POSITION_INDEPENDENT_CODE:BOOL=ON
+                    -DCMAKE_BUILD_TYPE:STRING=${THIRD_PARTY_BUILD_TYPE}
+    BUILD_BYPRODUCTS ${GFLAGS_LIBRARIES}
+)
+ADD_LIBRARY(gflags STATIC IMPORTED GLOBAL)
+SET_PROPERTY(TARGET gflags PROPERTY IMPORTED_LOCATION ${GFLAGS_LIBRARIES})
+ADD_DEPENDENCIES(gflags extern_gflags)
+
+if(UNIX AND (NOT APPLE))
+  list(APPEND GFLAGS_LIBRARIES pthread)
+endif()
+
+# On Windows (including MinGW), the Shlwapi library is used by gflags if available.
+if (WIN32)
+  include(CheckIncludeFileCXX)
+  check_include_file_cxx("shlwapi.h" HAVE_SHLWAPI)
+  if (HAVE_SHLWAPI)
+    set_property(GLOBAL PROPERTY OS_DEPENDENCY_MODULES shlwapi.lib)
+    list(APPEND GFLAGS_LIBRARIES shlwapi.lib)
+  endif(HAVE_SHLWAPI)
+endif (WIN32)

+ 0 - 0
libs/ultrainfer/cmake/glog.cmake → libs/ultra_infer/cmake/glog.cmake


+ 0 - 0
libs/ultrainfer/cmake/gtest.cmake → libs/ultra_infer/cmake/gtest.cmake


+ 0 - 0
libs/ultrainfer/cmake/horizon.cmake → libs/ultra_infer/cmake/horizon.cmake


+ 0 - 0
libs/ultrainfer/cmake/kunlunxin.cmake → libs/ultra_infer/cmake/kunlunxin.cmake


+ 0 - 0
libs/ultrainfer/cmake/onnxruntime.cmake → libs/ultra_infer/cmake/onnxruntime.cmake


+ 90 - 0
libs/ultra_infer/cmake/opencv.cmake

@@ -0,0 +1,90 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+set(COMPRESSED_SUFFIX ".tgz")
+
+if(WIN32)
+  if(NOT CMAKE_CL_64)
+    set(OPENCV_FILENAME "opencv-win-x86-3.4.16")
+  else()
+    set(OPENCV_FILENAME "opencv-win-x64-3.4.16")
+  endif()
+  set(COMPRESSED_SUFFIX ".zip")
+elseif(APPLE)
+  if(CURRENT_OSX_ARCH MATCHES "arm64")
+    set(OPENCV_FILENAME "opencv-osx-arm64-3.4.16")
+  else()
+    set(OPENCV_FILENAME "opencv-osx-x86_64-3.4.16")
+  endif()
+elseif(IOS)
+  message(FATAL_ERROR "Not support cross compiling for IOS now!")
+# Linux
+else()
+  if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
+    set(OPENCV_FILENAME "opencv-linux-aarch64-3.4.14")
+  endif()
+endif()
+
+if(NOT OPENCV_FILENAME)
+  set(OPENCV_FILENAME "opencv-linux-x64-3.4.16")
+endif()
+
+set(OPENCV_INSTALL_DIR ${THIRD_PARTY_PATH}/install/)
+if(WIN32)
+  if(NOT CMAKE_CL_64)
+    set(OPENCV_URL_PREFIX "https://bj.bcebos.com/fastdeploy/third_libs")
+  else()
+    set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
+  endif()
+else() # TODO: use ultra_infer/third_libs instead.
+  set(OPENCV_URL_PREFIX "https://bj.bcebos.com/paddle2onnx/libs")
+endif()
+if(NOT OPENCV_URL)
+  set(OPENCV_URL ${OPENCV_URL_PREFIX}/${OPENCV_FILENAME}${COMPRESSED_SUFFIX})
+endif()
+
+
+if(BUILD_ON_JETSON)
+  if(EXISTS /usr/lib/aarch64-linux-gnu/cmake/opencv4/)
+    set(OPENCV_DIRECTORY /usr/lib/aarch64-linux-gnu/cmake/opencv4/)
+  endif()
+endif()
+
+if(OPENCV_DIRECTORY)
+  message(STATUS "Use the opencv lib specified by user. The OpenCV path: ${OPENCV_DIRECTORY}")
+  STRING(REGEX REPLACE "\\\\" "/" OPENCV_DIRECTORY ${OPENCV_DIRECTORY})
+  # Win/Linux/Mac
+  set(OpenCV_DIR ${OPENCV_DIRECTORY})
+  find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR})
+  include_directories(${OpenCV_INCLUDE_DIRS})
+  list(APPEND DEPEND_LIBS ${OpenCV_LIBS})
+else()
+  message(STATUS "Use the default OpenCV lib from: ${OPENCV_URL}")
+  # Win/Linux/Mac
+  download_and_decompress(${OPENCV_URL} ${CMAKE_CURRENT_BINARY_DIR}/${OPENCV_FILENAME}${COMPRESSED_SUFFIX} ${THIRD_PARTY_PATH}/install/)
+  if(EXISTS ${THIRD_PARTY_PATH}/install/opencv)
+    file(REMOVE_RECURSE ${THIRD_PARTY_PATH}/install/opencv) 
+  endif()
+  file(RENAME ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME}/ ${THIRD_PARTY_PATH}/install/opencv)
+  set(OPENCV_FILENAME opencv)
+  if(NOT OpenCV_DIR)
+    set(OpenCV_DIR ${THIRD_PARTY_PATH}/install/${OPENCV_FILENAME})
+  endif()
+  if (WIN32)
+    set(OpenCV_DIR ${OpenCV_DIR}/build)
+  endif()
+  find_package(OpenCV REQUIRED PATHS ${OpenCV_DIR} NO_DEFAULT_PATH)
+  include_directories(${OpenCV_INCLUDE_DIRS})
+  list(APPEND DEPEND_LIBS opencv_core opencv_video opencv_highgui opencv_imgproc opencv_imgcodecs opencv_calib3d opencv_features2d opencv_flann)
+endif()

+ 0 - 0
libs/ultrainfer/cmake/openvino.cmake → libs/ultra_infer/cmake/openvino.cmake


+ 0 - 0
libs/ultrainfer/cmake/paddle2onnx.cmake → libs/ultra_infer/cmake/paddle2onnx.cmake


+ 0 - 0
libs/ultrainfer/cmake/paddle_inference.cmake → libs/ultra_infer/cmake/paddle_inference.cmake


+ 0 - 0
libs/ultrainfer/cmake/paddlelite.cmake → libs/ultra_infer/cmake/paddlelite.cmake


+ 0 - 0
libs/ultrainfer/cmake/poros.cmake → libs/ultra_infer/cmake/poros.cmake


+ 0 - 0
libs/ultrainfer/cmake/rknpu2.cmake → libs/ultra_infer/cmake/rknpu2.cmake


+ 0 - 0
libs/ultrainfer/cmake/sophgo.cmake → libs/ultra_infer/cmake/sophgo.cmake


+ 84 - 0
libs/ultra_infer/cmake/summary.cmake

@@ -0,0 +1,84 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+function(ultra_infer_summary)
+  message(STATUS "")
+  message(STATUS "*************UltraInfer Building Summary**********")
+  message(STATUS "  CMake version             : ${CMAKE_VERSION}")
+  message(STATUS "  CMake command             : ${CMAKE_COMMAND}")
+  message(STATUS "  System                    : ${CMAKE_SYSTEM_NAME}")
+  message(STATUS "  C++ compiler              : ${CMAKE_CXX_COMPILER}")
+  message(STATUS "  C++ standard              : ${CMAKE_CXX_STANDARD}")
+  message(STATUS "  C++ cuda standard         : ${CMAKE_CUDA_STANDARD}")
+  message(STATUS "  C++ compiler version      : ${CMAKE_CXX_COMPILER_VERSION}")
+  message(STATUS "  CXX flags                 : ${CMAKE_CXX_FLAGS}")
+  message(STATUS "  EXE linker flags          : ${CMAKE_EXE_LINKER_FLAGS}")
+  message(STATUS "  Shared linker flags       : ${CMAKE_SHARED_LINKER_FLAGS}")
+  message(STATUS "  Build type                : ${CMAKE_BUILD_TYPE}")
+  get_directory_property(tmp DIRECTORY ${PROJECT_SOURCE_DIR} COMPILE_DEFINITIONS)
+  message(STATUS "  Compile definitions       : ${tmp}")
+  message(STATUS "  CMAKE_PREFIX_PATH         : ${CMAKE_PREFIX_PATH}")
+  message(STATUS "  CMAKE_INSTALL_PREFIX      : ${CMAKE_INSTALL_PREFIX}")
+  message(STATUS "  CMAKE_MODULE_PATH         : ${CMAKE_MODULE_PATH}")
+  message(STATUS "")
+  message(STATUS "  UltraInfer version        : ${ULTRAINFER_VERSION}")
+  message(STATUS "  ENABLE_ORT_BACKEND        : ${ENABLE_ORT_BACKEND}")
+  message(STATUS "  ENABLE_RKNPU2_BACKEND     : ${ENABLE_RKNPU2_BACKEND}")
+  message(STATUS "  ENABLE_HORIZON_BACKEND    : ${ENABLE_HORIZON_BACKEND}")
+  message(STATUS "  ENABLE_SOPHGO_BACKEND     : ${ENABLE_SOPHGO_BACKEND}")
+  message(STATUS "  ENABLE_PADDLE_BACKEND     : ${ENABLE_PADDLE_BACKEND}")
+  message(STATUS "  ENABLE_LITE_BACKEND       : ${ENABLE_LITE_BACKEND}")
+  message(STATUS "  ENABLE_POROS_BACKEND      : ${ENABLE_POROS_BACKEND}")
+  message(STATUS "  ENABLE_TRT_BACKEND        : ${ENABLE_TRT_BACKEND}")
+  message(STATUS "  ENABLE_OPENVINO_BACKEND   : ${ENABLE_OPENVINO_BACKEND}")
+  message(STATUS "  ENABLE_TVM_BACKEND        : ${ENABLE_TVM_BACKEND}")
+  message(STATUS "  ENABLE_BENCHMARK          : ${ENABLE_BENCHMARK}")
+  message(STATUS "  ENABLE_VISION             : ${ENABLE_VISION}")
+  message(STATUS "  ENABLE_TEXT               : ${ENABLE_TEXT}")
+  message(STATUS "  ENABLE_FLYCV              : ${ENABLE_FLYCV}")
+  message(STATUS "  ENABLE_CVCUDA             : ${ENABLE_CVCUDA}")
+  message(STATUS "  WITH_GPU                  : ${WITH_GPU}")
+  message(STATUS "  WITH_IPU                  : ${WITH_IPU}")
+  message(STATUS "  WITH_OPENCL               : ${WITH_OPENCL}")
+  message(STATUS "  WITH_TESTING              : ${WITH_TESTING}")
+  message(STATUS "  WITH_ASCEND               : ${WITH_ASCEND}")
+  message(STATUS "  WITH_DIRECTML             : ${WITH_DIRECTML}")
+  message(STATUS "  WITH_TIMVX                : ${WITH_TIMVX}")
+  message(STATUS "  WITH_KUNLUNXIN            : ${WITH_KUNLUNXIN}")
+  message(STATUS "  WITH_CAPI                 : ${WITH_CAPI}")
+  message(STATUS "  WITH_CSHARPAPI            : ${WITH_CSHARPAPI}") 
+  if(ENABLE_ORT_BACKEND)
+    message(STATUS "  ONNXRuntime version       : ${ONNXRUNTIME_VERSION}")
+  endif()
+  if(ENABLE_PADDLE_BACKEND)
+    message(STATUS "  Paddle Inference version  : ${PADDLEINFERENCE_VERSION}")
+  endif()
+  if(ENABLE_POROS_BACKEND)
+    message(STATUS "  Poros version  : ${POROS_VERSION}")
+  endif()
+  if(ENABLE_OPENVINO_BACKEND)
+    message(STATUS "  OpenVINO version          : ${OPENVINO_VERSION}")
+  endif()
+  if(WITH_GPU)
+    message(STATUS "  CUDA_DIRECTORY            : ${CUDA_DIRECTORY}")
+    message(STATUS "  TRT_DRECTORY              : ${TRT_DIRECTORY}")
+  endif()
+  if (${BUILD_ULTRAINFER_PYTHON})
+    message(STATUS "  Python executable         : ${PYTHON_EXECUTABLE}")
+    message(STATUS "  Python includes           : ${PYTHON_INCLUDE_DIR}")
+  endif()
+  if(ENABLE_LITE_BACKEND)
+    message(STATUS "  Paddle Lite version       : ${PADDLELITE_VERSION}")
+  endif()
+endfunction()

+ 0 - 0
libs/ultrainfer/cmake/timvx.cmake → libs/ultra_infer/cmake/timvx.cmake


+ 0 - 0
libs/ultrainfer/cmake/toolchain.cmake → libs/ultra_infer/cmake/toolchain.cmake


+ 0 - 0
libs/ultrainfer/cmake/tvm.cmake → libs/ultra_infer/cmake/tvm.cmake


+ 223 - 0
libs/ultra_infer/cmake/utils.cmake

@@ -0,0 +1,223 @@
+# This function comes from https://blog.csdn.net/yindongjie1221/article/details/90614261
+function(redefine_file_macro targetname)
+    get_target_property(source_files "${targetname}" SOURCES)
+    foreach(sourcefile ${source_files})
+        get_property(defs SOURCE "${sourcefile}"
+            PROPERTY COMPILE_DEFINITIONS)
+        get_filename_component(filepath "${sourcefile}" ABSOLUTE)
+        string(REPLACE ${PROJECT_SOURCE_DIR}/ "" relpath ${filepath})
+        list(APPEND defs "__REL_FILE__=\"${relpath}\"")
+        set_property(
+            SOURCE "${sourcefile}"
+            PROPERTY COMPILE_DEFINITIONS ${defs}
+            )
+    endforeach()
+endfunction()
+
+function(download_and_decompress url filename decompress_dir)
+  if(NOT EXISTS ${filename})
+    message("Downloading file from ${url} to ${filename} ...")
+    file(DOWNLOAD ${url} "${filename}.tmp" SHOW_PROGRESS)
+    file(RENAME "${filename}.tmp" ${filename})
+  endif()
+  if(NOT EXISTS ${decompress_dir})
+    file(MAKE_DIRECTORY ${decompress_dir})
+  endif()
+  message("Decompress file ${filename} ...")
+  execute_process(COMMAND ${CMAKE_COMMAND} -E tar -xf ${filename} WORKING_DIRECTORY ${decompress_dir})
+endfunction()
+
+function(get_openvino_libs OPENVINO_RUNTIME_DIR)
+  set(LIB_LIST "")
+  find_library(OPENVINO_LIB openvino PATHS ${OPENVINO_RUNTIME_DIR}/lib/ ${OPENVINO_RUNTIME_DIR}/lib/intel64 NO_DEFAULT_PATH)
+  list(APPEND LIB_LIST ${OPENVINO_LIB})
+
+  set(TBB_DIR ${OPENVINO_RUNTIME_DIR}/3rdparty/tbb/lib/cmake)
+  message(STATUS "TBB_DIR: ${TBB_DIR}")
+  find_package(TBB PATHS ${TBB_DIR})
+  if (TBB_FOUND)
+    # 2024.10.22(zhangyue): Use openvino with tbb on linux
+    set(TBB_LIB "${OPENVINO_RUNTIME_DIR}/3rdparty/tbb/lib/libtbb.so.12")
+    list(APPEND LIB_LIST ${TBB_LIB})
+  else()
+    # TODO(zhoushunjie): Use openvino with tbb on linux in future.
+    set(OMP_LIB "${OPENVINO_RUNTIME_DIR}/3rdparty/omp/lib/libiomp5.so")
+    list(APPEND LIB_LIST ${OMP_LIB})
+  endif()
+  set(OPENVINO_LIBS ${LIB_LIST} PARENT_SCOPE)
+endfunction()
+
+function(remove_duplicate_libraries libraries)
+  list(LENGTH ${libraries} lib_length)
+  set(libraries_temp "")
+  set(full_libraries "")
+  foreach(lib_path ${${libraries}})
+    get_filename_component(lib_name ${lib_path} NAME)
+    list(FIND libraries_temp ${lib_name} lib_idx)
+    if (${lib_idx} EQUAL -1)
+      list(APPEND libraries_temp ${lib_name})
+      list(APPEND full_libraries ${lib_path})
+    endif()
+  endforeach()
+  set(${libraries} ${full_libraries} PARENT_SCOPE)
+endfunction()
+
+function(get_windows_path win_path origin_path)
+  STRING(REGEX REPLACE "/" "\\\\" _win_path ${origin_path})
+  set(${win_path} ${_win_path} PARENT_SCOPE)
+endfunction()
+
+function(get_osx_architecture)
+  if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64")
+    set(CURRENT_OSX_ARCH "arm64" PARENT_SCOPE)
+  elseif(CMAKE_OSX_ARCHITECTURES STREQUAL "x86_64")
+    set(CURRENT_OSX_ARCH "x86_64" PARENT_SCOPE)
+  else()
+    set(CURRENT_OSX_ARCH ${CMAKE_HOST_SYSTEM_PROCESSOR} PARENT_SCOPE)
+  endif()
+endfunction()
+
+
+# A fake target to include all the libraries and tests the ultra_infer module depends.
+add_custom_target(fd_compile_deps COMMAND echo 1)
+
+# A function to grep LINK_ONLY dependencies from INTERFACE_LINK_LIBRARIES
+function(regrex_link_only_libraries OUTPUT_DEPS PUBLIC_DEPS)
+  string(JOIN "#" _public_deps ${PUBLIC_DEPS})
+  string(REPLACE "$<LINK_ONLY:" "" _public_deps ${_public_deps})
+  string(REPLACE ">" "" _public_deps ${_public_deps})
+  string(REPLACE "#" ";" _public_deps ${_public_deps})
+  set(${OUTPUT_DEPS} ${_public_deps} PARENT_SCOPE)
+endfunction()
+
+# Bundle several static libraries into one. This function is modified from Paddle Lite. 
+# reference: https://github.com/PaddlePaddle/Paddle-Lite/blob/develop/cmake/lite.cmake#L252
+function(bundle_static_library tgt_name bundled_tgt_name fake_target)
+  list(APPEND static_libs ${tgt_name})
+  add_dependencies(fd_compile_deps ${fake_target})
+  # Set redundant static libs here, protobuf is already available 
+  # in the Paddle Lite static library. So, we don't need protobuf 
+  # in opencv. And there is no need for opencv_dnn, opencv_ml, 
+  # opencv_flann and some other modules. Therefore, we chose
+  # to discard these redundant modules.
+  set(REDUNDANT_STATIC_LIBS opencv_dnn opencv_calib3d opencv_photo 
+      opencv_flann opencv_objdetect opencv_stitching opencv_gapi 
+      opencv_ml libprotobuf)
+
+  function(_recursively_collect_dependencies input_target)
+    list(FIND REDUNDANT_STATIC_LIBS ${input_target} _input_redunant_id)
+    if(${_input_redunant_id} GREATER 0)
+      return()
+    endif()
+    set(_input_link_libraries LINK_LIBRARIES)
+    # https://cmake.org/cmake/help/latest/prop_tgt/TYPE.html
+    get_target_property(_input_type ${input_target} TYPE)
+    # In OpenCVModules.cmake, they set the deps of modules
+    # (opencv_core,...) as INTERFACE_LINK_LIBRARIES. The 
+    # 'Type' of opencv static lib is set as 'STATIC_LIBRARY'.
+    if ((${_input_type} STREQUAL "INTERFACE_LIBRARY")
+         OR (${_input_type} STREQUAL "STATIC_LIBRARY"))
+      set(_input_link_libraries INTERFACE_LINK_LIBRARIES)
+    endif()
+    get_target_property(_public_dependencies ${input_target} ${_input_link_libraries})
+    regrex_link_only_libraries(public_dependencies "${_public_dependencies}")
+    
+    foreach(dependency IN LISTS public_dependencies)
+      if(TARGET ${dependency})
+        get_target_property(alias ${dependency} ALIASED_TARGET)
+        if (TARGET ${alias})
+          set(dependency ${alias})
+        endif()
+        get_target_property(_type ${dependency} TYPE)
+        list(FIND REDUNDANT_STATIC_LIBS ${dependency} _deps_redunant_id)
+        if (${_type} STREQUAL "STATIC_LIBRARY" AND 
+            (NOT (${_deps_redunant_id} GREATER 0)))
+          list(APPEND static_libs ${dependency})
+        endif()
+
+        get_property(library_already_added
+          GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency})
+        if (NOT library_already_added)
+          set_property(GLOBAL PROPERTY _${tgt_name}_static_bundle_${dependency} ON)
+          if(NOT (${_deps_redunant_id} GREATER 0))
+            _recursively_collect_dependencies(${dependency})
+          endif()
+        endif()
+      endif()
+    endforeach()
+    set(static_libs ${static_libs} PARENT_SCOPE)
+  endfunction()
+
+  _recursively_collect_dependencies(${tgt_name})
+
+  list(REMOVE_DUPLICATES static_libs)
+  list(REMOVE_ITEM static_libs ${REDUNDANT_STATIC_LIBS})
+  message(STATUS "WITH_STATIC_LIB=${WITH_STATIC_LIB}, Found all needed static libs from dependecy tree: ${static_libs}")
+  message(STATUS "Exclude some redundant static libs: ${REDUNDANT_STATIC_LIBS}")
+
+  set(bundled_tgt_full_name
+    ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${bundled_tgt_name}${CMAKE_STATIC_LIBRARY_SUFFIX})
+
+  message(STATUS "Use bundled_tgt_full_name:  ${CMAKE_CURRENT_BINARY_DIR}/${CMAKE_STATIC_LIBRARY_PREFIX}${bundled_tgt_name}${CMAKE_STATIC_LIBRARY_SUFFIX}")
+
+  if(WIN32)
+    message(FATAL_ERROR "Not support UltraInfer static lib for windows now.")
+  endif()
+
+  add_custom_target(${fake_target} ALL COMMAND ${CMAKE_COMMAND} -E echo "Building fake_target ${fake_target}")
+  add_dependencies(${fake_target} ${tgt_name})
+  # add_dependencies(${fake_target} fastdelpoy_dummy)
+
+  if(NOT IOS AND NOT APPLE)
+    file(WRITE ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar.in
+      "CREATE ${bundled_tgt_full_name}\n" )
+
+    foreach(tgt IN LISTS static_libs)
+      file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar.in
+        "ADDLIB $<TARGET_FILE:${tgt}>\n")
+    endforeach()
+
+    file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar.in "SAVE\n")
+    file(APPEND ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar.in "END\n")
+
+    file(GENERATE
+      OUTPUT ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar
+      INPUT ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar.in)
+
+    set(ar_tool ${CMAKE_AR})
+    if (CMAKE_INTERPROCEDURAL_OPTIMIZATION)
+      set(ar_tool ${CMAKE_CXX_COMPILER_AR})
+    endif()
+    message(STATUS "Found ar_tool: ${ar_tool}")
+
+    add_custom_command(
+      TARGET ${fake_target} PRE_BUILD
+      COMMAND rm -f ${bundled_tgt_full_name}
+      COMMAND ${ar_tool} -M < ${CMAKE_CURRENT_BINARY_DIR}/${bundled_tgt_name}.ar
+      COMMENT "Bundling ${bundled_tgt_name}"
+      COMMAND ${CMAKE_STRIP} --strip-unneeded ${CMAKE_CURRENT_BINARY_DIR}/lib${bundled_tgt_name}.a
+      COMMENT "Stripped unneeded debug symbols in ${bundled_tgt_name}"
+      DEPENDS ${tgt_name}
+      VERBATIM)
+  else()
+    foreach(lib ${static_libs})
+      set(libfiles ${libfiles} $<TARGET_FILE:${lib}>)
+    endforeach()
+    add_custom_command(
+      TARGET ${fake_target} PRE_BUILD
+      COMMAND rm -f ${bundled_tgt_full_name}
+      COMMAND /usr/bin/libtool -static -o ${bundled_tgt_full_name} ${libfiles}
+      COMMENT "Bundling ${bundled_tgt_name}"
+      COMMAND ${CMAKE_STRIP} -S ${CMAKE_CURRENT_BINARY_DIR}/lib${bundled_tgt_name}.a
+      COMMENT "Stripped unneeded debug symbols in ${bundled_tgt_name}"
+      DEPENDS ${tgt_name}
+    )
+  endif()
+
+  add_library(${bundled_tgt_name} STATIC IMPORTED GLOBAL)
+  set_property(TARGET ${bundled_tgt_name} PROPERTY IMPORTED_LOCATION
+                                         ${bundled_tgt_full_name})          
+  add_dependencies(${bundled_tgt_name} ${fake_target})
+  add_dependencies(${bundled_tgt_name} ${tgt_name})
+
+endfunction()

+ 0 - 0
libs/ultrainfer/cpack/debian_postinst.in → libs/ultra_infer/cpack/debian_postinst.in


+ 0 - 0
libs/ultrainfer/cpack/debian_prerm.in → libs/ultra_infer/cpack/debian_prerm.in


+ 0 - 0
libs/ultrainfer/cpack/rpm_postinst.in → libs/ultra_infer/cpack/rpm_postinst.in


+ 0 - 0
libs/ultrainfer/cpack/rpm_postrm.in → libs/ultra_infer/cpack/rpm_postrm.in


+ 0 - 0
libs/ultrainfer/python/__init__.py → libs/ultra_infer/python/__init__.py


+ 0 - 0
libs/ultrainfer/python/requirements.txt → libs/ultra_infer/python/requirements.txt


+ 0 - 0
libs/ultrainfer/python/scripts/__init__.py → libs/ultra_infer/python/scripts/__init__.py


+ 12 - 0
libs/ultra_infer/python/scripts/build_gpu.sh

@@ -0,0 +1,12 @@
+export ENABLE_ORT_BACKEND=ON
+export ENABLE_OPENVINO_BACKEND=ON
+export ENABLE_PADDLE_BACKEND=ON
+export ENABLE_TRT_BACKEND=ON
+export TRT_DIRECTORY=/ultra_infer/libs/TensorRT-8.4.1.5
+export CUDA_DIRECTORY=/usr/local/cuda
+export ENABLE_VISION=ON
+export WITH_GPU=ON
+export CMAKE_CXX_COMPILER=/usr/local/gcc-8.2/bin/g++
+
+python setup.py build
+python setup.py bdist_wheel

+ 207 - 0
libs/ultra_infer/python/scripts/process_libraries.py.in

@@ -0,0 +1,207 @@
+
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import sys
+import shutil
+import subprocess
+import platform
+
+user_specified_dirs = ['@OPENCV_DIRECTORY@', '@ORT_DIRECTORY@', ]
+PACKAGE_NAME = os.getenv("PACKAGE_NAME", "ultra_infer")
+PY_PACKAGE_NAME = PACKAGE_NAME + "_main"
+
+
+def process_on_linux(current_dir):
+    rpaths = ["$ORIGIN:$ORIGIN/libs"]
+    fd_libs = list()
+    libs_path = os.path.join(current_dir, PACKAGE_NAME, "libs")
+    for f in os.listdir(libs_path):
+        filename = os.path.join(libs_path, f)
+        if not os.path.isfile(filename):
+            continue
+        if f.count(PACKAGE_NAME) and f.count(".so") > 0:
+            fd_libs.append(filename)
+
+    cmake_build_dir = os.path.join(current_dir, ".setuptools-cmake-build")
+    patchelf_bin_path = os.path.join(cmake_build_dir, "third_libs/patchelf/bin/patchelf")
+    if not os.path.exists(patchelf_bin_path):
+        patchelf_bin_path = "patchelf"
+
+    third_libs_path = os.path.join(libs_path, "third_libs")
+
+    # remove some useless opencv file in python wheels to decrease package size
+    if os.path.exists(os.path.join(third_libs_path, "opencv")):
+        for root, dirs, files in os.walk(os.path.join(third_libs_path, "opencv")):
+            for f in files:
+                items = f.strip().split('.')
+                if len(items) != 4:
+                    os.remove(os.path.join(root, f))
+                    continue
+                if items[0].strip() not in ["libopencv_highgui", "libopencv_video", "libopencv_videoio", "libopencv_imgcodecs", "libopencv_imgproc", "libopencv_core",  "libopencv_calib3d", "libopencv_features2d", "libopencv_flann"]:
+                    os.remove(os.path.join(root, f))
+
+    all_libs_paths = [third_libs_path] + user_specified_dirs
+    for path in all_libs_paths:
+        for root, dirs, files in os.walk(path):
+            for d in dirs:
+                if d not in ["lib", "lib64"]:
+                    continue
+                rel_path = os.path.relpath(os.path.join(root, d), libs_path)
+                if path in user_specified_dirs:
+                    # Note(zhoushunjie): Use the absolute path for user_specified_dirs
+                    rpath = os.path.join(root, d)
+                else:
+                    rpath = "$ORIGIN/" + rel_path
+                rpaths.append(rpath)
+    for lib in fd_libs:
+        command = "{} --set-rpath '{}' {}".format(patchelf_bin_path, ":".join(rpaths), lib)
+        if platform.machine() != 'sw_64' and platform.machine() != 'mips64':
+            assert subprocess.Popen(
+                command,
+                shell=True) != 0, "patchelf {} failed, the command: {}".format(
+                    command, lib)
+
+
+def process_on_mac(current_dir):
+    fd_libs = list()
+    libs_path = os.path.join(current_dir, PACKAGE_NAME, "libs")
+    cmake_build_dir = os.path.join(current_dir, ".setuptools-cmake-build")
+    for f in os.listdir(libs_path):
+        filename = os.path.join(libs_path, f)
+        if not os.path.isfile(filename):
+            continue
+        if f.count(PACKAGE_NAME) > 0 and (f.count(".dylib") > 0 or
+                                          f.count(".so") > 0):
+            fd_libs.append(filename)
+
+    commands = list()
+    pre_commands = list()
+    for lib in fd_libs:
+        if lib.count(PY_PACKAGE_NAME) > 0:
+            pre_commands.append(
+                "install_name_tool -delete_rpath {} ".format(cmake_build_dir) + lib)
+            commands.append("install_name_tool -id @loader_path " + lib)
+            commands.append("install_name_tool -add_rpath @loader_path " + lib)
+
+    third_libs_path = os.path.join(libs_path, "third_libs")
+    cmake_third_libs_path = os.path.join(cmake_build_dir, "third_libs", "install")
+    all_libs_paths = [cmake_third_libs_path] + user_specified_dirs
+    for path in all_libs_paths:
+        for root, dirs, files in os.walk(path):
+            for d in dirs:
+                if d not in ["lib", "lib64"]:
+                    continue
+                rel_path = os.path.relpath(os.path.join(root, d), cmake_third_libs_path)
+                if path in user_specified_dirs:
+                    # Note(zhoushunjie): Use the absolute path for user_specified_dirs
+                    need_delete_rpath = os.path.join(root, d)
+                    need_add_rpath = os.path.join(root, d)
+                else:
+                    need_delete_rpath = os.path.join(root, d)
+                    need_add_rpath = "@loader_path/third_libs/" + rel_path
+                for lib in fd_libs:
+                    if lib.count(PY_PACKAGE_NAME) > 0:
+                        pre_commands.append(
+                            "install_name_tool -delete_rpath {} {}".format(need_delete_rpath, lib))
+                        commands.append(
+                            "install_name_tool -add_rpath {} {}".format(need_add_rpath, lib))
+
+    for command in pre_commands:
+        try:
+            os.system(command)
+        except:
+            print("Skip execute command: " + command)
+
+    for command in commands:
+        assert os.system(
+            command) == 0, "command execute failed! command: {}".format(
+            command)
+
+def process_on_windows(current_dir):
+    libs_path = os.path.join(current_dir, PACKAGE_NAME, "libs")
+    third_libs_path = os.path.join(libs_path, "third_libs")
+    for root, dirs, files in os.walk(third_libs_path):
+        for f in files:
+            file_path = os.path.join(root, f)
+            if f.count('onnxruntime') > 0 and f.endswith('.dll'):
+                shutil.copy(file_path, libs_path)
+
+
+def get_all_files(dirname):
+    files = list()
+    for root, dirs, filenames in os.walk(dirname):
+        for f in filenames:
+            fullname = os.path.join(root, f)
+            files.append(fullname)
+    return files
+
+
+def process_libraries(current_dir):
+    if platform.system().lower() == "linux":
+        process_on_linux(current_dir)
+    elif platform.system().lower() == "darwin":
+        process_on_mac(current_dir)
+    elif platform.system().lower() == "windows":
+        process_on_windows(current_dir)
+
+    all_files = get_all_files(os.path.join(current_dir, PACKAGE_NAME, "libs"))
+    package_data = list()
+
+    if platform.system().lower() == "windows":
+
+        def check_windows_legal_file(f):
+            # Note(zhoushunjie): Special case for some library
+            # File 'plugins.xml' is special case of openvino.
+            for special_file in ['plugins.xml']:
+                if special_file in f:
+                    return True
+            return False
+
+        for f in all_files:
+            if f.endswith(".pyd") or f.endswith("lib") or f.endswith(
+                    "dll") or check_windows_legal_file(f):
+                package_data.append(
+                    os.path.relpath(f, os.path.join(current_dir,
+                                                    PACKAGE_NAME)))
+
+        return package_data
+
+    filters = [".vcxproj", ".png", ".java", ".h", ".cc", ".cpp", ".hpp"]
+    for f in all_files:
+        remain = True
+        for flt in filters:
+            if f.count(flt) > 0:
+                remain = False
+        filename = os.path.split(f)[-1]
+# Note(zhoushunjie): To add the trt libs below will increase the size of whl package by 450M.
+        if filename in [
+                "libnvinfer_plugin.so",
+                "libnvinfer.so", "libnvonnxparser.so",
+                "libnvparsers.so", "libnvcaffe_parser.so"
+        ]:
+            continue
+
+        for lib_prefix in ["libnvinfer_plugin.so.8.",
+            "libnvinfer.so.8.", "libnvonnxparser.so.8.",
+            "libnvparsers.so.8.", "libnvcaffe_parser.so.8."]:
+            if filename.startswith(lib_prefix):
+                remain = False
+                break
+
+        if remain:
+            package_data.append(
+                os.path.relpath(f, os.path.join(current_dir, PACKAGE_NAME)))
+    return package_data

+ 485 - 0
libs/ultra_infer/python/setup.py

@@ -0,0 +1,485 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+# This file refered to github.com/onnx/onnx.git
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+import shutil
+import os
+
+TOP_DIR = os.path.realpath(os.path.dirname(__file__))
+TOP_DIR = os.path.split(TOP_DIR)[0]
+PACKAGE_NAME = os.getenv("PACKAGE_NAME", "ultra_infer")
+wheel_name = os.getenv("WHEEL_NAME", "ultra_infer-python")
+
+if not os.path.exists(PACKAGE_NAME):
+    shutil.copytree("ultra_infer", PACKAGE_NAME)
+
+from distutils.spawn import find_executable
+from distutils import sysconfig, log
+import setuptools
+import setuptools.command.build_py
+import setuptools.command.develop
+import setuptools.command.build_ext
+
+from collections import namedtuple
+from contextlib import contextmanager
+import glob
+import shlex
+import subprocess
+import sys
+import platform
+from textwrap import dedent
+import multiprocessing
+
+with open(os.path.join(TOP_DIR, "python", "requirements.txt")) as fin:
+    REQUIRED_PACKAGES = fin.read()
+
+if os.getenv("BUILD_ON_CPU", "OFF") == "ON":
+    os.environ["ENABLE_PADDLE_BACKEND"] = "ON"
+    os.environ["ENABLE_ORT_BACKEND"] = "ON"
+    os.environ["ENABLE_OPENVINO_BACKEND"] = "ON"
+    os.environ["ENABLE_VISION"] = "ON"
+    os.environ["ENABLE_TEXT"] = "ON"
+    os.environ["WITH_GPU"] = "OFF"
+
+setup_configs = dict()
+setup_configs["LIBRARY_NAME"] = PACKAGE_NAME
+setup_configs["PY_LIBRARY_NAME"] = PACKAGE_NAME + "_main"
+# Backend options
+setup_configs["ENABLE_TVM_BACKEND"] = os.getenv("ENABLE_TVM_BACKEND", "OFF")
+setup_configs["ENABLE_RKNPU2_BACKEND"] = os.getenv("ENABLE_RKNPU2_BACKEND", "OFF")
+setup_configs["ENABLE_SOPHGO_BACKEND"] = os.getenv("ENABLE_SOPHGO_BACKEND", "OFF")
+setup_configs["ENABLE_ORT_BACKEND"] = os.getenv("ENABLE_ORT_BACKEND", "OFF")
+setup_configs["ENABLE_OPENVINO_BACKEND"] = os.getenv("ENABLE_OPENVINO_BACKEND", "OFF")
+setup_configs["ENABLE_PADDLE_BACKEND"] = os.getenv("ENABLE_PADDLE_BACKEND", "OFF")
+setup_configs["ENABLE_POROS_BACKEND"] = os.getenv("ENABLE_POROS_BACKEND", "OFF")
+setup_configs["ENABLE_TRT_BACKEND"] = os.getenv("ENABLE_TRT_BACKEND", "OFF")
+setup_configs["ENABLE_LITE_BACKEND"] = os.getenv("ENABLE_LITE_BACKEND", "OFF")
+setup_configs["ENABLE_PADDLE2ONNX"] = os.getenv("ENABLE_PADDLE2ONNX", "OFF")
+setup_configs["ENABLE_VISION"] = os.getenv("ENABLE_VISION", "OFF")
+setup_configs["ENABLE_FLYCV"] = os.getenv("ENABLE_FLYCV", "OFF")
+setup_configs["ENABLE_CVCUDA"] = os.getenv("ENABLE_CVCUDA", "OFF")
+setup_configs["ENABLE_TEXT"] = os.getenv("ENABLE_TEXT", "OFF")
+setup_configs["ENABLE_BENCHMARK"] = os.getenv("ENABLE_BENCHMARK", "OFF")
+# Hardware options
+setup_configs["WITH_GPU"] = os.getenv("WITH_GPU", "OFF")
+setup_configs["WITH_IPU"] = os.getenv("WITH_IPU", "OFF")
+setup_configs["WITH_OPENCL"] = os.getenv("WITH_OPENCL", "OFF")
+setup_configs["WITH_TIMVX"] = os.getenv("WITH_TIMVX", "OFF")
+setup_configs["WITH_DIRECTML"] = os.getenv("WITH_DIRECTML", "OFF")
+setup_configs["WITH_ASCEND"] = os.getenv("WITH_ASCEND", "OFF")
+setup_configs["WITH_KUNLUNXIN"] = os.getenv("WITH_KUNLUNXIN", "OFF")
+setup_configs["RKNN2_TARGET_SOC"] = os.getenv("RKNN2_TARGET_SOC", "")
+# Custom deps settings
+setup_configs["TRT_DIRECTORY"] = os.getenv("TRT_DIRECTORY", "UNDEFINED")
+setup_configs["CUDA_DIRECTORY"] = os.getenv("CUDA_DIRECTORY", "/usr/local/cuda")
+setup_configs["OPENCV_DIRECTORY"] = os.getenv("OPENCV_DIRECTORY", "")
+setup_configs["ORT_DIRECTORY"] = os.getenv("ORT_DIRECTORY", "")
+setup_configs["OPENVINO_DIRECTORY"] = os.getenv("OPENVINO_DIRECTORY", "")
+setup_configs["PADDLEINFERENCE_DIRECTORY"] = os.getenv("PADDLEINFERENCE_DIRECTORY", "")
+setup_configs["PADDLEINFERENCE_VERSION"] = os.getenv("PADDLEINFERENCE_VERSION", "")
+setup_configs["PADDLEINFERENCE_URL"] = os.getenv("PADDLEINFERENCE_URL", "")
+setup_configs["PADDLEINFERENCE_API_COMPAT_2_4_x"] = os.getenv(
+    "PADDLEINFERENCE_API_COMPAT_2_4_x", "OFF"
+)
+setup_configs["PADDLEINFERENCE_API_COMPAT_2_5_x"] = os.getenv(
+    "PADDLEINFERENCE_API_COMPAT_2_5_x", "OFF"
+)
+setup_configs["PADDLEINFERENCE_API_COMPAT_2_6_x"] = os.getenv(
+    "PADDLEINFERENCE_API_COMPAT_2_6_x", "OFF"
+)
+setup_configs["PADDLEINFERENCE_API_COMPAT_DEV"] = os.getenv(
+    "PADDLEINFERENCE_API_COMPAT_DEV", "OFF"
+)
+setup_configs["PADDLEINFERENCE_API_CUSTOM_OP"] = os.getenv(
+    "PADDLEINFERENCE_API_CUSTOM_OP", "OFF"
+)
+setup_configs["PADDLE2ONNX_URL"] = os.getenv("PADDLE2ONNX_URL", "")
+setup_configs["PADDLELITE_URL"] = os.getenv("PADDLELITE_URL", "")
+
+# Other settings
+setup_configs["BUILD_ON_JETSON"] = os.getenv("BUILD_ON_JETSON", "OFF")
+setup_configs["BUILD_PADDLE2ONNX"] = os.getenv("BUILD_PADDLE2ONNX", "OFF")
+
+if setup_configs["RKNN2_TARGET_SOC"] != "" or setup_configs["BUILD_ON_JETSON"] != "OFF":
+    REQUIRED_PACKAGES = REQUIRED_PACKAGES.replace("opencv-python", "")
+
+if wheel_name == "ultra_infer-python":
+    if setup_configs["WITH_GPU"] == "ON" or setup_configs["BUILD_ON_JETSON"] == "ON":
+        wheel_name = "ultra_infer-gpu-python"
+    elif setup_configs["WITH_IPU"] == "ON":
+        wheel_name = "ultra_infer-ipu-python"
+
+if os.getenv("CMAKE_CXX_COMPILER", None) is not None:
+    setup_configs["CMAKE_CXX_COMPILER"] = os.getenv("CMAKE_CXX_COMPILER")
+
+SRC_DIR = os.path.join(TOP_DIR, PACKAGE_NAME)
+PYTHON_SRC_DIR = os.path.join(TOP_DIR, "python", PACKAGE_NAME)
+CMAKE_BUILD_DIR = os.path.join(TOP_DIR, "python", ".setuptools-cmake-build")
+
+WINDOWS = os.name == "nt"
+
+CMAKE = find_executable("cmake3") or find_executable("cmake")
+MAKE = find_executable("make")
+
+setup_requires = []
+extras_require = {}
+
+################################################################################
+# Global variables for controlling the build variant
+################################################################################
+
+# Default value is set to TRUE\1 to keep the settings same as the current ones.
+# However going forward the recomemded way to is to set this to False\0
+USE_MSVC_STATIC_RUNTIME = bool(os.getenv("USE_MSVC_STATIC_RUNTIME", "1") == "1")
+ONNX_NAMESPACE = os.getenv("ONNX_NAMESPACE", "paddle2onnx")
+################################################################################
+# Version
+################################################################################
+
+try:
+    git_version = (
+        subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=TOP_DIR)
+        .decode("ascii")
+        .strip()
+    )
+except (OSError, subprocess.CalledProcessError):
+    git_version = None
+
+extra_version_info = ""
+if setup_configs["PADDLEINFERENCE_VERSION"] != "":
+    extra_version_info += "." + setup_configs["PADDLEINFERENCE_VERSION"]
+
+with open(os.path.join(TOP_DIR, "VERSION_NUMBER")) as version_file:
+    VersionInfo = namedtuple(
+        "VersionInfo",
+        [
+            "version",
+            "git_version",
+            "extra_version_info",
+            "enable_trt_backend",
+            "enable_paddle_backend",
+            "with_gpu",
+        ],
+    )(
+        version=version_file.read().strip(),
+        git_version=git_version,
+        extra_version_info=extra_version_info.strip("."),
+        enable_trt_backend=setup_configs["ENABLE_TRT_BACKEND"],
+        enable_paddle_backend=setup_configs["ENABLE_PADDLE_BACKEND"],
+        with_gpu=setup_configs["WITH_GPU"],
+    )
+
+################################################################################
+# Pre Check
+################################################################################
+
+assert CMAKE, 'Could not find "cmake" executable!'
+
+################################################################################
+# Utilities
+################################################################################
+
+
+@contextmanager
+def cd(path):
+    if not os.path.isabs(path):
+        raise RuntimeError("Can only cd to absolute path, got: {}".format(path))
+    orig_path = os.getcwd()
+    os.chdir(path)
+    try:
+        yield
+    finally:
+        os.chdir(orig_path)
+
+
+################################################################################
+# Customized commands
+################################################################################
+
+
+class NoOptionCommand(setuptools.Command):
+    user_options = []
+
+    def initialize_options(self):
+        pass
+
+    def finalize_options(self):
+        pass
+
+
+def get_all_files(dirname):
+    files = list()
+    for root, dirs, filenames in os.walk(dirname):
+        for f in filenames:
+            fullname = os.path.join(root, f)
+            files.append(fullname)
+    return files
+
+
+class create_version(NoOptionCommand):
+    def run(self):
+        with open(os.path.join(PYTHON_SRC_DIR, "code_version.py"), "w") as f:
+            f.write(
+                dedent(
+                    """\
+            # This file is generated by setup.py. DO NOT EDIT!
+            from __future__ import absolute_import
+            from __future__ import division
+            from __future__ import print_function
+            from __future__ import unicode_literals
+            version = '{version}'
+            git_version = '{git_version}'
+            extra_version_info = '{extra_version_info}'
+            enable_trt_backend = '{enable_trt_backend}'
+            enable_paddle_backend = '{enable_paddle_backend}'
+            with_gpu = '{with_gpu}'
+            """.format(
+                        **dict(VersionInfo._asdict())
+                    )
+                )
+            )
+
+
+class cmake_build(setuptools.Command):
+    """
+    Compiles everything when `python setupmnm.py build` is run using cmake.
+    Custom args can be passed to cmake by specifying the `CMAKE_ARGS`
+    environment variable.
+    The number of CPUs used by `make` can be specified by passing `-j<ncpus>`
+    to `setup.py build`.  By default all CPUs are used.
+    """
+
+    user_options = [
+        (str("jobs="), str("j"), str("Specifies the number of jobs to use with make"))
+    ]
+
+    built = False
+
+    def initialize_options(self):
+        self.jobs = None
+
+    def finalize_options(self):
+        if sys.version_info[0] >= 3:
+            self.set_undefined_options("build", ("parallel", "jobs"))
+        if self.jobs is None and os.getenv("MAX_JOBS") is not None:
+            self.jobs = os.getenv("MAX_JOBS")
+        self.jobs = multiprocessing.cpu_count() if self.jobs is None else int(self.jobs)
+
+    def run(self):
+        if cmake_build.built:
+            return
+        cmake_build.built = True
+        if not os.path.exists(CMAKE_BUILD_DIR):
+            os.makedirs(CMAKE_BUILD_DIR)
+
+        with cd(CMAKE_BUILD_DIR):
+            build_type = "Release"
+            # configure
+            cmake_args = [
+                CMAKE,
+                "-DPYTHON_INCLUDE_DIR={}".format(sysconfig.get_python_inc()),
+                "-DPYTHON_EXECUTABLE={}".format(sys.executable),
+                "-DBUILD_ULTRAINFER_PYTHON=ON",
+                "-DCMAKE_EXPORT_COMPILE_COMMANDS=ON",
+                "-DONNX_NAMESPACE={}".format(ONNX_NAMESPACE),
+                "-DPY_EXT_SUFFIX={}".format(
+                    sysconfig.get_config_var("EXT_SUFFIX") or ""
+                ),
+            ]
+            cmake_args.append("-DCMAKE_BUILD_TYPE=%s" % build_type)
+            for k, v in setup_configs.items():
+                cmake_args.append("-D{}={}".format(k, v))
+            if WINDOWS:
+                cmake_args.extend(
+                    [
+                        # we need to link with libpython on windows, so
+                        # passing python version to window in order to
+                        # find python in cmake
+                        "-DPY_VERSION={}".format(
+                            "{0}.{1}".format(*sys.version_info[:2])
+                        ),
+                    ]
+                )
+                if platform.architecture()[0] == "64bit":
+                    cmake_args.extend(["-A", "x64", "-T", "host=x64"])
+                else:
+                    cmake_args.extend(["-A", "Win32", "-T", "host=x86"])
+            if "CMAKE_ARGS" in os.environ:
+                extra_cmake_args = shlex.split(os.environ["CMAKE_ARGS"])
+                # prevent crossfire with downstream scripts
+                del os.environ["CMAKE_ARGS"]
+                log.info("Extra cmake args: {}".format(extra_cmake_args))
+                cmake_args.extend(extra_cmake_args)
+            cmake_args.append(TOP_DIR)
+            subprocess.check_call(cmake_args)
+
+            build_args = [CMAKE, "--build", os.curdir]
+            if WINDOWS:
+                build_args.extend(["--config", build_type])
+                build_args.extend(["--", "/maxcpucount:{}".format(self.jobs)])
+            else:
+                build_args.extend(["--", "-j", str(self.jobs)])
+            subprocess.check_call(build_args)
+
+
+class build_py(setuptools.command.build_py.build_py):
+    def run(self):
+        self.run_command("create_version")
+        self.run_command("cmake_build")
+
+        generated_python_files = glob.glob(
+            os.path.join(CMAKE_BUILD_DIR, PACKAGE_NAME, "*.py")
+        ) + glob.glob(os.path.join(CMAKE_BUILD_DIR, PACKAGE_NAME, "*.pyi"))
+
+        for src in generated_python_files:
+            dst = os.path.join(TOP_DIR, os.path.relpath(src, CMAKE_BUILD_DIR))
+            self.copy_file(src, dst)
+
+        return setuptools.command.build_py.build_py.run(self)
+
+
+class develop(setuptools.command.develop.develop):
+    def run(self):
+        self.run_command("build_py")
+        setuptools.command.develop.develop.run(self)
+
+
+class build_ext(setuptools.command.build_ext.build_ext):
+    def run(self):
+        self.run_command("cmake_build")
+        setuptools.command.build_ext.build_ext.run(self)
+
+    def build_extensions(self):
+        for ext in self.extensions:
+            fullname = self.get_ext_fullname(ext.name)
+            filename = os.path.basename(self.get_ext_filename(fullname))
+
+            lib_path = CMAKE_BUILD_DIR
+            if os.name == "nt":
+                debug_lib_dir = os.path.join(lib_path, "Debug")
+                release_lib_dir = os.path.join(lib_path, "Release")
+                if os.path.exists(debug_lib_dir):
+                    lib_path = debug_lib_dir
+                elif os.path.exists(release_lib_dir):
+                    lib_path = release_lib_dir
+            src = os.path.join(lib_path, filename)
+            dst = os.path.join(os.path.realpath(self.build_lib), PACKAGE_NAME, filename)
+            self.copy_file(src, dst)
+
+
+cmdclass = {
+    "create_version": create_version,
+    "cmake_build": cmake_build,
+    "build_py": build_py,
+    "develop": develop,
+    "build_ext": build_ext,
+}
+
+################################################################################
+# Extensions
+################################################################################
+
+ext_modules = [
+    setuptools.Extension(
+        name=str(PACKAGE_NAME + "." + setup_configs["PY_LIBRARY_NAME"]), sources=[]
+    ),
+]
+
+################################################################################
+# Packages
+################################################################################
+
+# no need to do fancy stuff so far
+if PACKAGE_NAME != "ultra_infer":
+    packages = setuptools.find_packages(exclude=["ultra_infer*", "scripts"])
+else:
+    packages = setuptools.find_packages(exclude=["xencrypt*", "scripts"])
+
+################################################################################
+# Test
+################################################################################
+
+if sys.version_info[0] == 3:
+    # Mypy doesn't work with Python 2
+    extras_require["mypy"] = ["mypy==0.600"]
+
+################################################################################
+# Final
+################################################################################
+
+package_data = {PACKAGE_NAME: ["LICENSE", "ThirdPartyNotices.txt"]}
+
+if sys.argv[1] == "install" or sys.argv[1] == "bdist_wheel":
+    shutil.copy(
+        os.path.join(TOP_DIR, "ThirdPartyNotices.txt"),
+        os.path.join(TOP_DIR, "python", PACKAGE_NAME),
+    )
+    shutil.copy(
+        os.path.join(TOP_DIR, "LICENSE"), os.path.join(TOP_DIR, "python", PACKAGE_NAME)
+    )
+    if not os.path.exists(
+        os.path.join(TOP_DIR, "python", PACKAGE_NAME, "libs", "third_libs")
+    ):
+        print(
+            f"Didn't detect path: {PACKAGE_NAME}/libs/third_libs exist, please execute `python setup.py build` first"
+        )
+        sys.exit(0)
+    from scripts.process_libraries import process_libraries
+
+    all_lib_data = process_libraries(os.path.split(os.path.abspath(__file__))[0])
+    package_data[PACKAGE_NAME].extend(all_lib_data)
+    setuptools.setup(
+        name=wheel_name,
+        version=VersionInfo.version + extra_version_info,
+        ext_modules=ext_modules,
+        description="Deploy Kit Tool For Deeplearning models.",
+        packages=packages,
+        package_data=package_data,
+        include_package_data=True,
+        setup_requires=setup_requires,
+        extras_require=extras_require,
+        author="ultra_infer",
+        install_requires=REQUIRED_PACKAGES,
+        classifiers=[
+            "Programming Language :: Python :: 3",
+            "License :: OSI Approved :: Apache Software License",
+            "Operating System :: OS Independent",
+        ],
+        license="Apache 2.0",
+    )
+else:
+    setuptools.setup(
+        name=wheel_name,
+        version=VersionInfo.version + extra_version_info,
+        description="Deploy Kit Tool For Deeplearning models.",
+        ext_modules=ext_modules,
+        cmdclass=cmdclass,
+        packages=packages,
+        package_data=package_data,
+        include_package_data=False,
+        setup_requires=setup_requires,
+        extras_require=extras_require,
+        author="ultra_infer",
+        install_requires=REQUIRED_PACKAGES,
+        classifiers=[
+            "Programming Language :: Python :: 3",
+            "License :: OSI Approved :: Apache Software License",
+            "Operating System :: OS Independent",
+        ],
+        license="Apache 2.0",
+    )

+ 186 - 0
libs/ultra_infer/python/ultra_infer/__init__.py

@@ -0,0 +1,186 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+import os
+import sys
+import platform
+
+# Create a symbol link to tensorrt library.
+trt_directory = os.path.join(
+    os.path.dirname(os.path.abspath(__file__)), "libs/third_libs/tensorrt/lib/"
+)
+if os.name != "nt" and os.path.exists(trt_directory):
+    logging.basicConfig(level=logging.INFO)
+    for trt_lib in [
+        "libnvcaffe_parser.so",
+        "libnvinfer_plugin.so",
+        "libnvinfer.so",
+        "libnvonnxparser.so",
+        "libnvparsers.so",
+    ]:
+        dst = os.path.join(trt_directory, trt_lib)
+        src = os.path.join(trt_directory, trt_lib + ".8")
+        if not os.path.exists(dst):
+            try:
+                os.symlink(src, dst)
+                logging.info(f"Create a symbolic link pointing to {src} named {dst}.")
+            except OSError as e:
+                logging.warning(
+                    f"Failed to create a symbolic link pointing to {src} by an unprivileged user. "
+                    "It may failed when you use Paddle TensorRT backend. "
+                    "Please use administator privilege to import ultra_infer at first time."
+                )
+                break
+
+    # HACK: Reset the root logger config that got messed up by FD.
+    root_logger = logging.getLogger()
+    root_logger.level = logging.WARNING
+    for handler in root_logger.handlers[:]:
+        root_logger.removeHandler(handler)
+
+from .code_version import version, git_version, extra_version_info
+from .code_version import enable_trt_backend, enable_paddle_backend, with_gpu
+
+# Note(zhoushunjie): Fix the import order of paddle and ultra_infer library.
+# This solution will be removed it when the confilct of paddle and
+# ultra_infer is fixed.
+
+# Note(qiuyanjun): Add backward compatible for paddle 2.4.x
+sys_platform = platform.platform().lower()
+
+
+def get_paddle_version():
+    paddle_version = ""
+    try:
+        import pkg_resources
+
+        paddle_version = pkg_resources.require("paddlepaddle-gpu")[0].version.split(
+            ".post"
+        )[0]
+    except:
+        try:
+            paddle_version = pkg_resources.require("paddlepaddle")[0].version.split(
+                ".post"
+            )[0]
+        except:
+            pass
+    return paddle_version
+
+
+def should_import_paddle():
+    if ("paddle2.4" in extra_version_info) or ("post24" in extra_version_info):
+        paddle_version = get_paddle_version()
+        if (
+            paddle_version != ""
+            and paddle_version <= "2.4.2"
+            and paddle_version != "0.0.0"
+        ):
+            return True
+    return False
+
+
+def should_set_tensorrt():
+    if (
+        with_gpu == "ON"
+        and enable_paddle_backend == "ON"
+        and enable_trt_backend == "ON"
+    ):
+        return True
+    return False
+
+
+def tensorrt_is_avaliable():
+    # Note(qiuyanjun): Only support linux now.
+    found_trt_lib = False
+    if ("linux" in sys_platform) and ("LD_LIBRARY_PATH" in os.environ.keys()):
+        for lib_path in os.environ["LD_LIBRARY_PATH"].split(":"):
+            if os.path.exists(os.path.join(lib_path, "libnvinfer.so")):
+                found_trt_lib = True
+                break
+    return found_trt_lib
+
+
+try:
+    # windows: no conflict between ultra_infer and paddle.
+    # linux: must import paddle first to solve the conflict.
+    # macos: still can not solve the conflict between ultra_infer and paddle,
+    #        due to the global flags redefined in paddle/paddle_inference so.
+    #        we got the error (ERROR: flag 'xxx' was defined more than once).
+    if "linux" in sys_platform:
+        if should_import_paddle():
+            import paddle  # need import paddle first for paddle2.4.x
+
+            # check whether tensorrt in LD_LIBRARY_PATH for ultra_infer
+            if should_set_tensorrt() and (not tensorrt_is_avaliable()):
+                if os.path.exists(trt_directory):
+                    logging.info(
+                        "\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for UltraInfer! \
+            \n[WARNING] Please export [ YOUR CUSTOM TensorRT ] lib path to LD_LIBRARY_PATH first, or run the command: \
+            \n[WARNING] Linux: 'export LD_LIBRARY_PATH=$(python -c 'from ultra_infer import trt_directory; print(trt_directory)'):$LD_LIBRARY_PATH'"
+                    )
+                else:
+                    logging.info(
+                        "\n[WARNING] Can not find TensorRT lib in LD_LIBRARY_PATH for UltraInfer! \
+            \n[WARNING] Please export [YOUR CUSTOM TensorRT] lib path to LD_LIBRARY_PATH first."
+                    )
+except:
+    pass
+
+
+os.environ["FLAGS_enable_pir_api"] = "0"
+logging.warning(
+    "Please note that we have set the environment variable \
+'FLAGS_enable_pir_api' to 'False' to ensure the correct operation of the Paddle backend."
+)
+
+
+from .c_lib_wrap import (
+    ModelFormat,
+    Backend,
+    FDDataType,
+    TensorInfo,
+    Device,
+    is_built_with_gpu,
+    is_built_with_ort,
+    ModelFormat,
+    is_built_with_paddle,
+    is_built_with_trt,
+    get_default_cuda_directory,
+)
+
+
+def set_logger(enable_info=True, enable_warning=True):
+    """Set behaviour of logger while using UltraInfer
+
+    :param enable_info: (boolean)Whether to print out log level of INFO
+    :param enable_warning: (boolean)Whether to print out log level of WARNING, recommend to set to True
+    """
+    from .c_lib_wrap import set_logger
+
+    set_logger(enable_info, enable_warning)
+
+
+from .runtime import Runtime, RuntimeOption
+from .model import UltraInferModel
+from . import c_lib_wrap as C
+from . import vision
+from . import pipeline
+from . import text
+from . import ts
+from .download import download, download_and_decompress, download_model, get_model_list
+
+
+__version__ = version

+ 0 - 0
libs/ultrainfer/python/ultrainfer/c_lib_wrap.py.in → libs/ultra_infer/python/ultra_infer/c_lib_wrap.py.in


+ 0 - 0
libs/ultrainfer/python/ultrainfer/download.py → libs/ultra_infer/python/ultra_infer/download.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/model.py → libs/ultra_infer/python/ultra_infer/model.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/pipeline/__init__.py → libs/ultra_infer/python/ultra_infer/pipeline/__init__.py


+ 58 - 0
libs/ultra_infer/python/ultra_infer/pipeline/pptinypose/__init__.py

@@ -0,0 +1,58 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+from ... import c_lib_wrap as C
+
+
+class PPTinyPose(object):
+    def __init__(self, det_model=None, pptinypose_model=None):
+        """Set initialized detection model object and pptinypose model object
+
+        :param det_model: (ultra_infer.vision.detection.PicoDet)Initialized detection model object
+        :param pptinypose_model: (ultra_infer.vision.keypointdetection.PPTinyPose)Initialized pptinypose model object
+        """
+        assert (
+            det_model is not None or pptinypose_model is not None
+        ), "The det_model and pptinypose_model cannot be None."
+        self._pipeline = C.pipeline.PPTinyPose(
+            det_model._model, pptinypose_model._model
+        )
+
+    def predict(self, input_image):
+        """Predict the keypoint detection result for an input image
+
+        :param im: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :return: KeyPointDetectionResult
+        """
+        return self._pipeline.predict(input_image)
+
+    @property
+    def detection_model_score_threshold(self):
+        """Atrribute of PPTinyPose pipeline model. Stating the score threshold for detectin model to filter bbox before inputting pptinypose model
+
+        :return: value of detection_model_score_threshold(float)
+        """
+        return self._pipeline.detection_model_score_threshold
+
+    @detection_model_score_threshold.setter
+    def detection_model_score_threshold(self, value):
+        """Set attribute detection_model_score_threshold of PPTinyPose pipeline model.
+
+        :param value: (float)The value to set use_dark
+        """
+        assert isinstance(
+            value, float
+        ), "The value to set `detection_model_score_threshold` must be type of float."
+        self._pipeline.detection_model_score_threshold = value

+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/__init__.py → libs/ultra_infer/python/ultra_infer/py_only/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/base.py → libs/ultra_infer/python/ultra_infer/py_only/base.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/ts/__init__.py → libs/ultra_infer/python/ultra_infer/py_only/ts/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/ts/model.py → libs/ultra_infer/python/ultra_infer/py_only/ts/model.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/ts/processors.py → libs/ultra_infer/python/ultra_infer/py_only/ts/processors.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/vision/__init__.py → libs/ultra_infer/python/ultra_infer/py_only/vision/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/vision/model.py → libs/ultra_infer/python/ultra_infer/py_only/vision/model.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/py_only/vision/processors.py → libs/ultra_infer/python/ultra_infer/py_only/vision/processors.py


+ 706 - 0
libs/ultra_infer/python/ultra_infer/runtime.py

@@ -0,0 +1,706 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from __future__ import absolute_import
+import logging
+import numpy as np
+from . import ModelFormat
+from . import c_lib_wrap as C
+
+
+class Runtime:
+    """UltraInfer Runtime object."""
+
+    def __init__(self, runtime_option):
+        """Initialize a UltraInfer Runtime object.
+
+        :param runtime_option: (ultra_infer.RuntimeOption)Options for UltraInfer Runtime
+        """
+
+        self._runtime = C.Runtime()
+        self.runtime_option = runtime_option
+        assert self._runtime.init(
+            self.runtime_option._option
+        ), "Initialize Runtime Failed!"
+
+    def forward(self, *inputs):
+        """[Only for Poros backend] Inference with input data for poros
+
+        :param data: (list[str : numpy.ndarray])The input data list
+        :return list of numpy.ndarray
+        """
+        if self.runtime_option._option.model_format != ModelFormat.TORCHSCRIPT:
+            raise Exception(
+                "The forward function is only used for Poros backend, please call infer function"
+            )
+        inputs_dict = dict()
+        for i in range(len(inputs)):
+            inputs_dict["x" + str(i)] = inputs[i]
+        return self.infer(inputs_dict)
+
+    def infer(self, data):
+        """Inference with input data.
+
+        :param data: (dict[str : numpy.ndarray])The input data dict, key value must keep same with the loaded model
+        :return list of numpy.ndarray
+        """
+        assert isinstance(data, dict) or isinstance(
+            data, list
+        ), "The input data should be type of dict or list."
+        if isinstance(data, dict):
+            for k, v in data.items():
+                if isinstance(v, np.ndarray) and not v.data.contiguous:
+                    data[k] = np.ascontiguousarray(data[k])
+
+        return self._runtime.infer(data)
+
+    def bind_input_tensor(self, name, fdtensor):
+        """Bind FDTensor by name, no copy and share input memory
+
+        :param name: (str)The name of input data.
+        :param fdtensor: (ultra_infer.FDTensor)The input FDTensor.
+        """
+        self._runtime.bind_input_tensor(name, fdtensor)
+
+    def bind_output_tensor(self, name, fdtensor):
+        """Bind FDTensor by name, no copy and share output memory
+
+        :param name: (str)The name of output data.
+        :param fdtensor: (ultra_infer.FDTensor)The output FDTensor.
+        """
+        self._runtime.bind_output_tensor(name, fdtensor)
+
+    def zero_copy_infer(self):
+        """No params inference the model.
+
+        the input and output data need to pass through the bind_input_tensor and get_output_tensor interfaces.
+        """
+        self._runtime.infer()
+
+    def get_output_tensor(self, name):
+        """Get output FDTensor by name, no copy and share backend output memory
+
+        :param name: (str)The name of output data.
+        :return ultra_infer.FDTensor
+        """
+        return self._runtime.get_output_tensor(name)
+
+    def compile(self, warm_datas):
+        """[Only for Poros backend] compile with prewarm data for poros
+
+        :param data: (list[str : numpy.ndarray])The prewarm data list
+        :return TorchScript Model
+        """
+        if self.runtime_option._option.model_format != ModelFormat.TORCHSCRIPT:
+            raise Exception(
+                "The compile function is only used for Poros backend, please call infer function"
+            )
+        assert isinstance(warm_datas, list), "The prewarm data should be type of list."
+        for i in range(len(warm_datas)):
+            warm_data = warm_datas[i]
+            if isinstance(warm_data[0], np.ndarray):
+                warm_data = list(data for data in warm_data)
+            else:
+                warm_data = list(data.numpy() for data in warm_data)
+            warm_datas[i] = warm_data
+        return self._runtime.compile(warm_datas, self.runtime_option._option)
+
+    def num_inputs(self):
+        """Get number of inputs of the loaded model."""
+        return self._runtime.num_inputs()
+
+    def num_outputs(self):
+        """Get number of outputs of the loaded model."""
+        return self._runtime.num_outputs()
+
+    def get_input_info(self, index):
+        """Get input information of the loaded model.
+
+        :param index: (int)Index of the input
+        :return ultra_infer.TensorInfo
+        """
+        assert isinstance(
+            index, int
+        ), "The input parameter index should be type of int."
+        assert (
+            index < self.num_inputs()
+        ), "The input parameter index:{} should less than number of inputs:{}.".format(
+            index, self.num_inputs
+        )
+        return self._runtime.get_input_info(index)
+
+    def get_output_info(self, index):
+        """Get output information of the loaded model.
+
+        :param index: (int)Index of the output
+        :return ultra_infer.TensorInfo
+        """
+        assert isinstance(
+            index, int
+        ), "The input parameter index should be type of int."
+        assert (
+            index < self.num_outputs()
+        ), "The input parameter index:{} should less than number of outputs:{}.".format(
+            index, self.num_outputs
+        )
+        return self._runtime.get_output_info(index)
+
+    def get_profile_time(self):
+        """Get profile time of Runtime after the profile process is done."""
+        return self._runtime.get_profile_time()
+
+
+class RuntimeOption:
+    """Options for UltraInfer Runtime."""
+
+    __slots__ = ["_option"]
+
+    def __init__(self):
+        """Initialize a UltraInfer RuntimeOption object."""
+
+        self._option = C.RuntimeOption()
+
+    def set_model_path(
+        self, model_path, params_path="", model_format=ModelFormat.PADDLE
+    ):
+        """Set path of model file and parameters file
+
+        :param model_path: (str)Path of model file
+        :param params_path: (str)Path of parameters file
+        :param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
+        """
+        return self._option.set_model_path(model_path, params_path, model_format)
+
+    def set_model_buffer(
+        self, model_buffer, params_buffer="", model_format=ModelFormat.PADDLE
+    ):
+        """Specify the memory buffer of model and parameter. Used when model and params are loaded directly from memory
+        :param model_buffer: (bytes)The memory buffer of model
+        :param params_buffer: (bytes)The memory buffer of the parameters
+        :param model_format: (ModelFormat)Format of model, support ModelFormat.PADDLE/ModelFormat.ONNX/ModelFormat.TORCHSCRIPT
+        """
+        return self._option.set_model_buffer(model_buffer, params_buffer, model_format)
+
+    def use_gpu(self, device_id=0):
+        """Inference with Nvidia GPU
+
+        :param device_id: (int)The index of GPU will be used for inference, default 0
+        """
+        if not C.is_built_with_gpu():
+            logging.warning(
+                "The installed ultra_infer-python package is not built with GPU, will force to use CPU. To use GPU, following the commands to install ultra_infer-gpu-python."
+            )
+            return
+        return self._option.use_gpu(device_id)
+
+    def use_kunlunxin(
+        self,
+        device_id=0,
+        l3_workspace_size=16 * 1024 * 1024,
+        locked=False,
+        autotune=True,
+        autotune_file="",
+        precision="int16",
+        adaptive_seqlen=False,
+        enable_multi_stream=False,
+        gm_default_size=0,
+    ):
+        """Inference with KunlunXin XPU
+
+        :param device_id: (int)The index of KunlunXin XPU will be used for inference, default 0
+        :param l3_workspace_size: (int)The size of the video memory allocated by the l3 cache, the maximum is 16M, default 16M
+        :param locked: (bool)Whether the allocated L3 cache can be locked. If false, it means that the L3 cache is not locked,
+                        and the allocated L3 cache can be shared by multiple models, and multiple models
+        :param autotune: (bool)Whether to autotune the conv operator in the model.
+                        If true, when the conv operator of a certain dimension is executed for the first time,
+                        it will automatically search for a better algorithm to improve the performance of subsequent conv operators of the same dimension.
+        :param autotune_file: (str)Specify the path of the autotune file. If autotune_file is specified,
+                        the algorithm specified in the file will be used and autotune will not be performed again.
+        :param precision: (str)Calculation accuracy of multi_encoder
+        :param adaptive_seqlen: (bool)adaptive_seqlen Is the input of multi_encoder variable length
+        :param enable_multi_stream: (bool)Whether to enable the multi stream of KunlunXin XPU.
+        :param gm_default_size The default size of context global memory of KunlunXin XPU.
+        """
+        return self._option.use_kunlunxin(
+            device_id,
+            l3_workspace_size,
+            locked,
+            autotune,
+            autotune_file,
+            precision,
+            adaptive_seqlen,
+            enable_multi_stream,
+            gm_default_size,
+        )
+
+    def use_cpu(self):
+        """Inference with CPU"""
+        return self._option.use_cpu()
+
+    def use_rknpu2(
+        self, rknpu2_name=C.CpuName.RK356X, rknpu2_core=C.CoreMask.RKNN_NPU_CORE_AUTO
+    ):
+        return self._option.use_rknpu2(rknpu2_name, rknpu2_core)
+
+    def use_sophgo(self):
+        """Inference with SOPHGO TPU"""
+        return self._option.use_sophgo()
+
+    def use_ascend(self):
+        """Inference with Huawei Ascend NPU"""
+        return self._option.use_ascend()
+
+    def disable_valid_backend_check(self):
+        """Disable checking validity of backend during inference"""
+        return self._option.disable_valid_backend_check()
+
+    def enable_valid_backend_check(self):
+        """Enable checking validity of backend during inference"""
+        return self._option.enable_valid_backend_check()
+
+    def set_cpu_thread_num(self, thread_num=-1):
+        """Set number of threads if inference with CPU
+
+        :param thread_num: (int)Number of threads, if not positive, means the number of threads is decided by the backend, default -1
+        """
+        return self._option.set_cpu_thread_num(thread_num)
+
+    def set_ort_graph_opt_level(self, level=-1):
+        """Set graph optimization level for ONNX Runtime backend
+
+        :param level: (int)Optimization level, -1 means the default setting
+        """
+        logging.warning(
+            "`RuntimeOption.set_ort_graph_opt_level` will be deprecated in v1.2.0, please use `RuntimeOption.graph_optimize_level = 99` instead."
+        )
+        self._option.ort_option.graph_optimize_level = level
+
+    def use_paddle_backend(self):
+        """Use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU."""
+        return self._option.use_paddle_backend()
+
+    def use_paddle_infer_backend(self):
+        """Wrapper function of use_paddle_backend(), use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU."""
+        return self.use_paddle_backend()
+
+    def use_poros_backend(self):
+        """Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU."""
+        return self._option.use_poros_backend()
+
+    def use_ort_backend(self):
+        """Use ONNX Runtime backend, support inference Paddle/ONNX model on CPU/Nvidia GPU."""
+        return self._option.use_ort_backend()
+
+    def use_tvm_backend(self):
+        """Use TVM Runtime backend, support inference TVM model on CPU."""
+        return self._option.use_tvm_backend()
+
+    def use_trt_backend(self):
+        """Use TensorRT backend, support inference Paddle/ONNX model on Nvidia GPU."""
+        return self._option.use_trt_backend()
+
+    def use_openvino_backend(self):
+        """Use OpenVINO backend, support inference Paddle/ONNX model on CPU."""
+        return self._option.use_openvino_backend()
+
+    def use_lite_backend(self):
+        """Use Paddle Lite backend, support inference Paddle model on ARM CPU."""
+        return self._option.use_lite_backend()
+
+    def use_paddle_lite_backend(self):
+        """Wrapper function of use_lite_backend(), use Paddle Lite backend, support inference Paddle model on ARM CPU."""
+        return self.use_lite_backend()
+
+    def set_lite_context_properties(self, context_properties):
+        """Set nnadapter context properties for Paddle Lite backend."""
+        logging.warning(
+            "`RuntimeOption.set_lite_context_properties` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_context_properties = ...` instead."
+        )
+        self._option.paddle_lite_option.nnadapter_context_properties = (
+            context_properties
+        )
+
+    def set_lite_model_cache_dir(self, model_cache_dir):
+        """Set nnadapter model cache dir for Paddle Lite backend."""
+        logging.warning(
+            "`RuntimeOption.set_lite_model_cache_dir` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_model_cache_dir = ...` instead."
+        )
+
+        self._option.paddle_lite_option.nnadapter_model_cache_dir = model_cache_dir
+
+    def set_lite_dynamic_shape_info(self, dynamic_shape_info):
+        """Set nnadapter dynamic shape info for Paddle Lite backend."""
+        logging.warning(
+            "`RuntimeOption.set_lite_dynamic_shape_info` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_dynamic_shape_info = ...` instead."
+        )
+        self._option.paddle_lite_option.nnadapter_dynamic_shape_info = (
+            dynamic_shape_info
+        )
+
+    def set_lite_subgraph_partition_path(self, subgraph_partition_path):
+        """Set nnadapter subgraph partition path for Paddle Lite backend."""
+        logging.warning(
+            "`RuntimeOption.set_lite_subgraph_partition_path` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_subgraph_partition_config_path = ...` instead."
+        )
+        self._option.paddle_lite_option.nnadapter_subgraph_partition_config_path = (
+            subgraph_partition_path
+        )
+
+    def set_lite_subgraph_partition_config_buffer(self, subgraph_partition_buffer):
+        """Set nnadapter subgraph partition buffer for Paddle Lite backend."""
+        logging.warning(
+            "`RuntimeOption.set_lite_subgraph_partition_buffer` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_subgraph_partition_config_buffer = ...` instead."
+        )
+        self._option.paddle_lite_option.nnadapter_subgraph_partition_config_buffer = (
+            subgraph_partition_buffer
+        )
+
+    def set_lite_mixed_precision_quantization_config_path(
+        self, mixed_precision_quantization_config_path
+    ):
+        """Set nnadapter mixed precision quantization config path for Paddle Lite backend.."""
+        logging.warning(
+            "`RuntimeOption.set_lite_mixed_precision_quantization_config_path` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.nnadapter_mixed_precision_quantization_config_path = ...` instead."
+        )
+        self._option.paddle_lite_option.nnadapter_mixed_precision_quantization_config_path = (
+            mixed_precision_quantization_config_path
+        )
+
+    def set_paddle_mkldnn(self, use_mkldnn=True):
+        """Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default."""
+        logging.warning(
+            "`RuntimeOption.set_paddle_mkldnn` will be derepcated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_mkldnn = True` instead."
+        )
+        self._option.paddle_infer_option.enable_mkldnn = True
+
+    def set_openvino_device(self, name="CPU"):
+        """Set device name for OpenVINO, default 'CPU', can also be 'AUTO', 'GPU', 'GPU.1'....
+        This interface is deprecated, please use `RuntimeOption.openvino_option.set_device` instead.
+        """
+        logging.warning(
+            "`RuntimeOption.set_openvino_device` will be deprecated in v1.2.0, please use `RuntimeOption.openvino_option.set_device` instead."
+        )
+        self._option.openvino_option.set_device(name)
+
+    def set_openvino_shape_info(self, shape_info):
+        """Set shape information of the models' inputs, used for GPU to fix the shape
+           This interface is deprecated, please use `RuntimeOption.openvino_option.set_shape_info` instead.
+
+        :param shape_info: (dict{str, list of int})Shape information of model's inputs, e.g {"image": [1, 3, 640, 640], "scale_factor": [1, 2]}
+        """
+        logging.warning(
+            "`RuntimeOption.set_openvino_shape_info` will be deprecated in v1.2.0, please use `RuntimeOption.openvino_option.set_shape_info` instead."
+        )
+        self._option.openvino_option.set_shape_info(shape_info)
+
+    def set_openvino_cpu_operators(self, operators):
+        """While using OpenVINO backend and intel GPU, this interface specifies unsupported operators to run on CPU
+           This interface is deprecated, please use `RuntimeOption.openvino_option.set_cpu_operators` instead.
+
+        :param operators: (list of string)list of operators' name, e.g ["MulticlasNms"]
+        """
+        logging.warning(
+            "`RuntimeOption.set_openvino_cpu_operators` will be deprecated in v1.2.0, please use `RuntimeOption.openvino_option.set_cpu_operators` instead."
+        )
+        self._option.openvino_option.set_cpu_operators(operators)
+
+    def enable_paddle_log_info(self):
+        """Enable print out the debug log information while using Paddle Inference backend, the log information is disabled by default."""
+        logging.warning(
+            "RuntimeOption.enable_paddle_log_info` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_log_info = True` instead."
+        )
+        self._option.paddle_infer_option.enable_log_info = True
+
+    def disable_paddle_log_info(self):
+        """Disable print out the debug log information while using Paddle Inference backend, the log information is disabled by default."""
+        logging.warning(
+            "RuntimeOption.disable_paddle_log_info` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.enable_log_info = False` instead."
+        )
+        self._option.paddle_infer_option.enable_log_info = False
+
+    def set_paddle_mkldnn_cache_size(self, cache_size):
+        """Set size of shape cache while using Paddle Inference backend with MKLDNN enabled, default will cache all the dynamic shape."""
+        logging.warning(
+            "RuntimeOption.set_paddle_mkldnn_cache_size` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.mkldnn_cache_size = {}` instead.".format(
+                cache_size
+            )
+        )
+        self._option.paddle_infer_option.mkldnn_cache_size = cache_size
+
+    def enable_lite_fp16(self):
+        """Enable half precision inference while using Paddle Lite backend on ARM CPU, fp16 is disabled by default."""
+        logging.warning(
+            "`RuntimeOption.enable_lite_fp16` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.enable_fp16 = True` instead."
+        )
+        self._option.paddle_lite_option.enable_fp16 = True
+
+    def disable_lite_fp16(self):
+        """Disable half precision inference while using Paddle Lite backend on ARM CPU, fp16 is disabled by default."""
+        logging.warning(
+            "`RuntimeOption.disable_lite_fp16` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.enable_fp16 = False` instead."
+        )
+        self._option.paddle_lite_option.enable_fp16 = False
+
+    def set_lite_power_mode(self, mode):
+        """Set POWER mode while using Paddle Lite backend on ARM CPU."""
+        logging.warning(
+            "`RuntimeOption.set_lite_powermode` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_lite_option.power_mode = {}` instead.".format(
+                mode
+            )
+        )
+        self._option.paddle_lite_option.power_mode = mode
+
+    def set_trt_input_shape(
+        self, tensor_name, min_shape, opt_shape=None, max_shape=None
+    ):
+        """Set shape range information while using TensorRT backend with loadding a model contains dynamic input shape. While inference with a new input shape out of the set shape range, the tensorrt engine will be rebuilt to expand the shape range information.
+
+        :param tensor_name: (str)Name of input which has dynamic shape
+        :param min_shape: (list of int)Minimum shape of the input, e.g [1, 3, 224, 224]
+        :param opt_shape: (list of int)Optimize shape of the input, this offten set as the most common input shape, if set to None, it will keep same with min_shape
+        :param max_shape: (list of int)Maximum shape of the input, e.g [8, 3, 224, 224], if set to None, it will keep same with the min_shape
+        """
+        logging.warning(
+            "`RuntimeOption.set_trt_input_shape` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.set_shape()` instead."
+        )
+        if opt_shape is None and max_shape is None:
+            opt_shape = min_shape
+            max_shape = min_shape
+        else:
+            assert (
+                opt_shape is not None and max_shape is not None
+            ), "Set min_shape only, or set min_shape, opt_shape, max_shape both."
+        return self._option.trt_option.set_shape(
+            tensor_name, min_shape, opt_shape, max_shape
+        )
+
+    def set_trt_input_data(
+        self, tensor_name, min_input_data, opt_input_data=None, max_input_data=None
+    ):
+        """Set input data while using TensorRT backend with loadding a model contains dynamic input shape.
+
+        :param tensor_name: (str)Name of input which has dynamic shape
+        :param min_input_data: (list of int)Input data for Minimum shape of the input.
+        :param opt_input_data: (list of int)Input data for Optimize shape of the input, if set to None, it will keep same with min_input_data
+        :param max_input_data: (list of int)Input data for Maximum shape of the input, if set to None, it will keep same with the min_input_data
+        """
+        logging.warning(
+            "`RuntimeOption.set_trt_input_data` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.set_input_data()` instead."
+        )
+        if opt_input_data is None and max_input_data is None:
+            opt_input_data = min_input_data
+            opt_input_data = min_input_data
+        else:
+            assert (
+                opt_input_data is not None and max_input_data is not None
+            ), "Set min_input_data only, or set min_input_data, opt_input_data, max_input_data both."
+        return self._option.trt_option.set_input_data(
+            tensor_name, min_input_data, opt_input_data, max_input_data
+        )
+
+    def set_trt_cache_file(self, cache_file_path):
+        """Set a cache file path while using TensorRT backend. While loading a Paddle/ONNX model with set_trt_cache_file("./tensorrt_cache/model.trt"), if file `./tensorrt_cache/model.trt` exists, it will skip building tensorrt engine and load the cache file directly; if file `./tensorrt_cache/model.trt` doesn't exist, it will building tensorrt engine and save the engine as binary string to the cache file.
+
+        :param cache_file_path: (str)Path of tensorrt cache file
+        """
+        logging.warning(
+            "`RuntimeOption.set_trt_cache_file` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.serialize_file = {}` instead.".format(
+                cache_file_path
+            )
+        )
+        self._option.trt_option.serialize_file = cache_file_path
+
+    def enable_trt_fp16(self):
+        """Enable half precision inference while using TensorRT backend, notice that not all the Nvidia GPU support FP16, in those cases, will fallback to FP32 inference."""
+        logging.warning(
+            "`RuntimeOption.enable_trt_fp16` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.enable_fp16 = True` instead."
+        )
+        self._option.trt_option.enable_fp16 = True
+
+    def disable_trt_fp16(self):
+        """Disable half precision inference while suing TensorRT backend."""
+        logging.warning(
+            "`RuntimeOption.disable_trt_fp16` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.enable_fp16 = False` instead."
+        )
+        self._option.trt_option.enable_fp16 = False
+
+    def enable_pinned_memory(self):
+        """Enable pinned memory. Pinned memory can be utilized to speedup the data transfer between CPU and GPU. Currently it's only suppurted in TRT backend and Paddle Inference backend."""
+        return self._option.enable_pinned_memory()
+
+    def disable_pinned_memory(self):
+        """Disable pinned memory."""
+        return self._option.disable_pinned_memory()
+
+    def enable_paddle_to_trt(self):
+        """While using TensorRT backend, enable_paddle_to_trt() will change to use Paddle Inference backend, and use its integrated TensorRT instead."""
+        logging.warning(
+            "`RuntimeOption.enable_paddle_to_trt` will be deprecated in v1.2.l0, if you want to run tensorrt with Paddle Inference backend, please use the following method, "
+        )
+        logging.warning("    ==============================================")
+        logging.warning("    import ultra_infer as fd")
+        logging.warning("    option = fd.RuntimeOption()")
+        logging.warning("    option.use_gpu(0)")
+        logging.warning("    option.use_paddle_infer_backend()")
+        logging.warning("    option.paddle_infer_option.enable_trt = True")
+        logging.warning("    ==============================================")
+        self._option.use_paddle_backend()
+        self._option.paddle_infer_option.enable_trt = True
+
+    def set_trt_max_workspace_size(self, trt_max_workspace_size):
+        """Set max workspace size while using TensorRT backend."""
+        logging.warning(
+            "`RuntimeOption.set_trt_max_workspace_size` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.max_workspace_size = {}` instead.".format(
+                trt_max_workspace_size
+            )
+        )
+        self._option.trt_option.max_workspace_size = trt_max_workspace_size
+
+    def set_trt_max_batch_size(self, trt_max_batch_size):
+        """Set max batch size while using TensorRT backend."""
+        logging.warning(
+            "`RuntimeOption.set_trt_max_batch_size` will be deprecated in v1.2.0, please use `RuntimeOption.trt_option.max_batch_size = {}` instead.".format(
+                trt_max_batch_size
+            )
+        )
+        self._option.trt_option.max_batch_size = trt_max_batch_size
+
+    def enable_paddle_trt_collect_shape(self):
+        """Enable collect subgraph shape information while using Paddle Inference with TensorRT"""
+        logging.warning(
+            "`RuntimeOption.enable_paddle_trt_collect_shape` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.collect_trt_shape = True` instead."
+        )
+        self._option.paddle_infer_option.collect_trt_shape = True
+
+    def disable_paddle_trt_collect_shape(self):
+        """Disable collect subgraph shape information while using Paddle Inference with TensorRT"""
+        logging.warning(
+            "`RuntimeOption.disable_paddle_trt_collect_shape` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.collect_trt_shape = False` instead."
+        )
+        self._option.paddle_infer_option.collect_trt_shape = False
+
+    def delete_paddle_backend_pass(self, pass_name):
+        """Delete pass by name in paddle backend"""
+        logging.warning(
+            "`RuntimeOption.delete_paddle_backend_pass` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.delete_pass` instead."
+        )
+        self._option.paddle_infer_option.delete_pass(pass_name)
+
+    def disable_paddle_trt_ops(self, ops):
+        """Disable some ops in paddle trt backend"""
+        logging.warning(
+            "`RuntimeOption.disable_paddle_trt_ops` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.disable_trt_ops()` instead."
+        )
+        self._option.disable_trt_ops(ops)
+
+    def use_ipu(
+        self,
+        device_num=1,
+        micro_batch_size=1,
+        enable_pipelining=False,
+        batches_per_step=1,
+    ):
+        return self._option.use_ipu(
+            device_num, micro_batch_size, enable_pipelining, batches_per_step
+        )
+
+    def set_ipu_config(
+        self,
+        enable_fp16=False,
+        replica_num=1,
+        available_memory_proportion=1.0,
+        enable_half_partial=False,
+    ):
+        logging.warning(
+            "`RuntimeOption.set_ipu_config` will be deprecated in v1.2.0, please use `RuntimeOption.paddle_infer_option.set_ipu_config()` instead."
+        )
+        self._option.paddle_infer_option.set_ipu_config(
+            enable_fp16, replica_num, available_memory_proportion, enable_half_partial
+        )
+
+    @property
+    def poros_option(self):
+        """Get PorosBackendOption object to configure Poros backend
+
+        :return PorosBackendOption
+        """
+        return self._option.poros_option
+
+    @property
+    def paddle_lite_option(self):
+        """Get LiteBackendOption object to configure Paddle Lite backend
+
+        :return LiteBackendOption
+        """
+        return self._option.paddle_lite_option
+
+    @property
+    def openvino_option(self):
+        """Get OpenVINOOption object to configure OpenVINO backend
+
+        :return OpenVINOOption
+        """
+        return self._option.openvino_option
+
+    @property
+    def ort_option(self):
+        """Get OrtBackendOption object to configure ONNX Runtime backend
+
+        :return OrtBackendOption
+        """
+        return self._option.ort_option
+
+    @property
+    def trt_option(self):
+        """Get TrtBackendOption object to configure TensorRT backend
+
+        :return TrtBackendOption
+        """
+        return self._option.trt_option
+
+    @property
+    def paddle_infer_option(self):
+        """Get PaddleBackendOption object to configure Paddle Inference backend
+
+        :return PaddleBackendOption
+        """
+        return self._option.paddle_infer_option
+
+    def enable_profiling(self, inclue_h2d_d2h=False, repeat=100, warmup=50):
+        """Set the profile mode as 'true'.
+        :param inclue_h2d_d2h Whether to include time of H2D_D2H for time of runtime.
+        :param repeat Repeat times for runtime inference.
+        :param warmup Warmup times for runtime inference.
+        """
+        return self._option.enable_profiling(inclue_h2d_d2h, repeat, warmup)
+
+    def disable_profiling(self):
+        """Set the profile mode as 'false'."""
+        return self._option.disable_profiling()
+
+    def set_external_raw_stream(self, cuda_stream):
+        """Set the external raw stream used by ultra_infer runtime."""
+        self._option.set_external_raw_stream(cuda_stream)
+
+    def __repr__(self):
+        attrs = dir(self._option)
+        message = "RuntimeOption(\n"
+        for attr in attrs:
+            if attr.startswith("__"):
+                continue
+            if hasattr(getattr(self._option, attr), "__call__"):
+                continue
+            message += "  {} : {}\t\n".format(attr, getattr(self._option, attr))
+        message.strip("\n")
+        message += ")"
+        return message

+ 0 - 0
libs/ultrainfer/python/ultrainfer/text/__init__.py → libs/ultra_infer/python/ultra_infer/text/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/text/uie/__init__.py → libs/ultra_infer/python/ultra_infer/text/uie/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/__init__.py → libs/ultra_infer/python/ultra_infer/ts/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/anomalydetection/__init__.py → libs/ultra_infer/python/ultra_infer/ts/anomalydetection/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/anomalydetection/ppts/__init__.py → libs/ultra_infer/python/ultra_infer/ts/anomalydetection/ppts/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/classification/__init__.py → libs/ultra_infer/python/ultra_infer/ts/classification/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/classification/ppts/__init__.py → libs/ultra_infer/python/ultra_infer/ts/classification/ppts/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/forecasting/__init__.py → libs/ultra_infer/python/ultra_infer/ts/forecasting/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/ts/forecasting/ppts/__init__.py → libs/ultra_infer/python/ultra_infer/ts/forecasting/ppts/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/utils/__init__.py → libs/ultra_infer/python/ultra_infer/utils/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/utils/example_resource.py → libs/ultra_infer/python/ultra_infer/utils/example_resource.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/utils/hub_config.py → libs/ultra_infer/python/ultra_infer/utils/hub_config.py


+ 57 - 0
libs/ultra_infer/python/ultra_infer/utils/hub_env.py

@@ -0,0 +1,57 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+"""
+This module is used to store environmental variables for ultra_infer model hub.
+
+ULTRAINFER_HUB_HOME              -->  the root directory for storing ultra_infer model hub related data. Default to ~/.ultra_infer. Users can change the
+├                          default value through the ULTRAINFER_HUB_HOME environment variable.
+├── MODEL_HOME       -->   Store the downloaded ultra_infer models.
+├── CONF_HOME        -->   Store the default configuration files.
+"""
+
+import os
+
+
+def _get_user_home():
+    return os.path.expanduser("~")
+
+
+def _get_hub_home():
+    if "ULTRAINFER_HUB_HOME" in os.environ:
+        home_path = os.environ["ULTRAINFER_HUB_HOME"]
+        if os.path.exists(home_path):
+            if os.path.isdir(home_path):
+                return home_path
+            else:
+                raise RuntimeError(
+                    "The environment variable ULTRAINFER_HUB_HOME {} is not a directory.".format(
+                        home_path
+                    )
+                )
+        else:
+            return home_path
+    return os.path.join(_get_user_home(), ".ultra_infer")
+
+
+def _get_sub_home(directory):
+    home = os.path.join(_get_hub_home(), directory)
+    os.makedirs(home, exist_ok=True)
+    return home
+
+
+USER_HOME = _get_user_home()
+HUB_HOME = _get_hub_home()
+MODEL_HOME = _get_sub_home("models")
+CONF_HOME = _get_sub_home("conf")
+RESOURCE_HOME = _get_sub_home("resources")

+ 134 - 0
libs/ultra_infer/python/ultra_infer/utils/hub_model_server.py

@@ -0,0 +1,134 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import json
+import requests
+from typing import List
+
+from .hub_config import config
+
+
+class ServerConnectionError(Exception):
+    def __init__(self, url: str):
+        self.url = url
+
+    def __str__(self):
+        tips = "Can't connect to UltraInfer Model Server: {}".format(self.url)
+        return tips
+
+
+class ModelServer(object):
+    """
+    UltraInfer server source
+
+    Args:
+        url(str) : Url of the server
+        timeout(int) : Request timeout
+    """
+
+    def __init__(self, url: str, timeout: int = 10):
+        self._url = url
+        self._timeout = timeout
+
+    def search_model(
+        self, name: str, format: str = None, version: str = None
+    ) -> List[dict]:
+        """
+        Search model from model server.
+
+        Args:
+            name(str) : UltraInfer model name
+            format(str): UltraInfer model format
+            version(str) : UltraInfer model version
+        Return:
+            result(list): search results
+        """
+        params = {}
+        params["name"] = name
+        if format:
+            params["format"] = format
+        if version:
+            params["version"] = version
+        result = self.request(path="ultra_infer_search", params=params)
+        if result["status"] == 0 and len(result["data"]) > 0:
+            return result["data"]
+        return None
+
+    def stat_model(self, name: str, format: str, version: str):
+        """
+        Note a record when download a model for statistics.
+
+        Args:
+            name(str) : UltraInfer model name
+            format(str): UltraInfer model format
+            version(str) : UltraInfer model version
+        Return:
+            is_successful(bool): True if successful, False otherwise
+        """
+        params = {}
+        params["name"] = name
+        params["format"] = format
+        params["version"] = version
+        params["from"] = "ultra_infer"
+        try:
+            result = self.request(path="stat", params=params)
+        except Exception:
+            return False
+        if result["status"] == 0:
+            return True
+        else:
+            return False
+
+    def request(self, path: str, params: dict) -> dict:
+        """Request server."""
+        api = "{}/{}".format(self._url, path)
+        try:
+            result = requests.get(api, params, timeout=self._timeout)
+            return result.json()
+        except requests.exceptions.ConnectionError as e:
+            raise ServerConnectionError(self._url)
+
+    def get_model_list(self):
+        """
+        Get all pre-trained models information in dataset.
+        Return:
+            result(dict): key is category name, value is a list which contains models \
+                information such as name, format and version.
+        """
+        api = "{}/{}".format(self._url, "ultra_infer_listmodels")
+        try:
+            result = requests.get(api, timeout=self._timeout)
+            return result.json()
+        except requests.exceptions.ConnectionError as e:
+            raise ServerConnectionError(self._url)
+
+    def is_connected(self):
+        return self.check(self._url)
+
+    @classmethod
+    def check(cls, url: str) -> bool:
+        """
+        Check if the specified url is a valid model server
+
+        Args:
+            url(str) : Url to check
+        """
+        try:
+            r = requests.get(url + "/search")
+            return r.status_code == 200
+        except:
+            return False
+
+
+model_server = ModelServer(config.server)

+ 0 - 0
libs/ultrainfer/python/ultrainfer/utils/misc.py → libs/ultra_infer/python/ultra_infer/utils/misc.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/__init__.py → libs/ultra_infer/python/ultra_infer/vision/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/classification/__init__.py → libs/ultra_infer/python/ultra_infer/vision/classification/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/classification/contrib/__init__.py → libs/ultra_infer/python/ultra_infer/vision/classification/contrib/__init__.py


+ 104 - 0
libs/ultra_infer/python/ultra_infer/vision/classification/contrib/resnet.py

@@ -0,0 +1,104 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class ResNet(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a image classification model exported by torchvision.ResNet.
+
+        :param model_file: (str)Path of model file, e.g resnet/resnet50.onnx
+        :param params_file: (str)Path of parameters file, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model, default is ONNX
+        """
+
+        # call super() to initialize the backend_option
+        # the result of initialization will be saved in self._runtime_option
+        super(ResNet, self).__init__(runtime_option)
+
+        self._model = C.vision.classification.ResNet(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # self.initialized shows the initialization of the model is successful or not
+
+        assert self.initialized, "ResNet initialize failed."
+
+    # Predict and return the inference result of "input_image".
+    def predict(self, input_image, topk=1):
+        """Classify an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param topk: (int)The topk result by the classify confidence score, default 1
+        :return: ClassifyResult
+        """
+        return self._model.predict(input_image, topk)
+
+    # Implement the setter and getter method for variables
+    @property
+    def size(self):
+        """
+        Returns the preprocess image size, default size = [224, 224];
+        """
+        return self._model.size
+
+    @property
+    def mean_vals(self):
+        """
+        Returns the mean value of normlization, default mean_vals = [0.485f, 0.456f, 0.406f];
+        """
+        return self._model.mean_vals
+
+    @property
+    def std_vals(self):
+        """
+        Returns the std value of normlization, default std_vals = [0.229f, 0.224f, 0.225f];
+        """
+        return self._model.std_vals
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @mean_vals.setter
+    def mean_vals(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `mean_vals` must be type of list."
+        self._model.mean_vals = value
+
+    @std_vals.setter
+    def std_vals(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `std_vals` must be type of list."
+        self._model.std_vals = value

+ 140 - 0
libs/ultra_infer/python/ultra_infer/vision/classification/contrib/yolov5cls.py

@@ -0,0 +1,140 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv5ClsPreprocessor:
+    def __init__(self):
+        """Create a preprocessor for YOLOv5Cls"""
+        self._preprocessor = C.vision.classification.YOLOv5ClsPreprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for YOLOv5Cls
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [224, 224]
+        """
+        return self._preprocessor.size
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+
+class YOLOv5ClsPostprocessor:
+    def __init__(self):
+        """Create a postprocessor for YOLOv5Cls"""
+        self._postprocessor = C.vision.classification.YOLOv5ClsPostprocessor()
+
+    def run(self, runtime_results, ims_info):
+        """Postprocess the runtime results for YOLOv5Cls
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results, ims_info)
+
+    @property
+    def topk(self):
+        """
+        topk for postprocessing, default is 1
+        """
+        return self._postprocessor.topk
+
+    @topk.setter
+    def topk(self, topk):
+        assert isinstance(topk, int), "The value to set `top k` must be type of int."
+        self._postprocessor.topk = topk
+
+
+class YOLOv5Cls(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv5Cls model exported by YOLOv5Cls.
+
+        :param model_file: (str)Path of model file, e.g ./YOLOv5Cls.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+
+        super(YOLOv5Cls, self).__init__(runtime_option)
+
+        assert (
+            model_format == ModelFormat.ONNX
+        ), "YOLOv5Cls only support model format of ModelFormat.ONNX now."
+        self._model = C.vision.classification.YOLOv5Cls(
+            model_file, params_file, self._runtime_option, model_format
+        )
+
+        assert self.initialized, "YOLOv5Cls initialize failed."
+
+    def predict(self, input_image):
+        """Classify an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :return: ClassifyResult
+        """
+        assert input_image is not None, "Input image is None."
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of ClassifyResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get YOLOv5ClsPreprocessor object of the loaded model
+
+        :return YOLOv5ClsPreprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get YOLOv5ClsPostprocessor object of the loaded model
+
+        :return YOLOv5ClsPostprocessor
+        """
+        return self._model.postprocessor

+ 288 - 0
libs/ultra_infer/python/ultra_infer/vision/classification/ppcls/__init__.py

@@ -0,0 +1,288 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+
+import logging
+from dataclasses import dataclass
+from typing import List
+
+import numpy as np
+
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+from ...common import ProcessorManager
+from ....py_only import PyOnlyProcessorChain
+from ....py_only.vision import PyOnlyVisionModel, processors as P
+from ....utils.misc import load_config
+
+
+class PaddleClasPreprocessor(ProcessorManager):
+    def __init__(self, config_file):
+        """Create a preprocessor for PaddleClasModel from configuration file
+
+        :param config_file: (str)Path of configuration file, e.g resnet50/inference_cls.yaml
+        """
+        super(PaddleClasPreprocessor, self).__init__()
+        self._manager = C.vision.classification.PaddleClasPreprocessor(config_file)
+
+    def disable_normalize(self):
+        """
+        This function will disable normalize in preprocessing step.
+        """
+        self._manager.disable_normalize()
+
+    def disable_permute(self):
+        """
+        This function will disable hwc2chw in preprocessing step.
+        """
+        self._manager.disable_permute()
+
+    def initial_resize_on_cpu(self, v):
+        """
+        When the initial operator is Resize, and input image size is large,
+        maybe it's better to run resize on CPU, because the HostToDevice memcpy
+        is time consuming. Set this True to run the initial resize on CPU.
+        :param: v: True or False
+        """
+        self._manager.initial_resize_on_cpu(v)
+
+
+class PaddleClasPostprocessor:
+    def __init__(self, topk=1):
+        """Create a postprocessor for PaddleClasModel
+
+        :param topk: (int)Filter the top k classify label
+        """
+        self._postprocessor = C.vision.classification.PaddleClasPostprocessor(topk)
+
+    def run(self, runtime_results):
+        """Postprocess the runtime results for PaddleClasModel
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :return: list of ClassifyResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results)
+
+
+class PaddleClasModel(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file,
+        config_file,
+        runtime_option=None,
+        model_format=ModelFormat.PADDLE,
+    ):
+        """Load a image classification model exported by PaddleClas.
+
+        :param model_file: (str)Path of model file, e.g resnet50/inference.pdmodel
+        :param params_file: (str)Path of parameters file, e.g resnet50/inference.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param config_file: (str) Path of configuration file for deploy, e.g resnet50/inference_cls.yaml
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+
+        super(PaddleClasModel, self).__init__(runtime_option)
+        self._model = C.vision.classification.PaddleClasModel(
+            model_file, params_file, config_file, self._runtime_option, model_format
+        )
+        assert self.initialized, "PaddleClas model initialize failed."
+
+    def clone(self):
+        """Clone PaddleClasModel object
+
+        :return: a new PaddleClasModel object
+        """
+
+        class PaddleClasCloneModel(PaddleClasModel):
+            def __init__(self, model):
+                self._model = model
+
+        clone_model = PaddleClasCloneModel(self._model.clone())
+        return clone_model
+
+    def predict(self, im, topk=1):
+        """Classify an input image
+
+        :param im: (numpy.ndarray) The input image data, a 3-D array with layout HWC, BGR format
+        :param topk: (int) Filter the topk classify result, default 1
+        :return: ClassifyResult
+        """
+
+        self.postprocessor.topk = topk
+        return self._model.predict(im)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of ClassifyResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get PaddleClasPreprocessor object of the loaded model
+
+        :return PaddleClasPreprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get PaddleClasPostprocessor object of the loaded model
+
+        :return PaddleClasPostprocessor
+        """
+        return self._model.postprocessor
+
+
+class _PyOnlyMultilabelClassificationPreprocessor(object):
+    def __init__(self, config):
+        super().__init__()
+        processors = self._build_processors(config)
+        processors.insert(0, P.BGR2RGB())
+        self._processor_chain = PyOnlyProcessorChain(processors)
+
+    def run(self, data):
+        return self._processor_chain(data)
+
+    def _build_processors(self, config):
+        processors = []
+        for item in config:
+            tf_type = next(iter(item))
+            args = item[tf_type]
+            if tf_type == "ResizeImage":
+                if args.keys() - {"resize_short", "size", "backend", "interpolation"}:
+                    raise ValueError
+                args.setdefault("resize_short", None)
+                args.setdefault("size", None)
+                # TODO: `backend` & `interpolation`
+                if not (args["resize_short"] or args["size"]):
+                    raise ValueError
+                if args.get("resize_short"):
+                    processor = P.ResizeByShort(
+                        target_short_edge=args["resize_short"],
+                        size_divisor=None,
+                        interp="LINEAR",
+                    )
+                else:
+                    processor = P.Resize(target_size=args["size"])
+            elif tf_type == "CropImage":
+                if args.keys() - {"size"}:
+                    raise ValueError
+                args.setdefault("size", 224)
+                processor = P.Crop(crop_size=args["size"])
+            elif tf_type == "NormalizeImage":
+                if args.keys() - {"mean", "std", "scale", "order", "channel_num"}:
+                    raise ValueError
+                args.setdefault("mean", [0.485, 0.456, 0.406])
+                args.setdefault("std", [0.229, 0.224, 0.225])
+                args.setdefault("scale", 1 / 255)
+                args.setdefault("order", "")
+                args.setdefault("channel_num", 3)
+                if args["order"] != "":
+                    raise ValueError
+                if args["channel_num"] != 3:
+                    raise ValueError
+                processor = P.Normalize(
+                    scale=args["scale"], mean=args["mean"], std=args["std"]
+                )
+            elif tf_type == "ToCHWImage":
+                if args:
+                    raise ValueError
+                processor = P.ToCHWImage()
+            else:
+                raise ValueError("Unknown transform type")
+            processors.append(processor)
+        return processors
+
+
+@dataclass
+class _PyOnlyMultilabelClassificationResult(object):
+    label_ids: List[int]
+    scores: List[float]
+
+
+class _PyOnlyMultilabelClassificationPostprocessor(object):
+    def __init__(self, config):
+        super().__init__()
+        self._threshold = config["threshold"]
+
+    def run(self, data):
+        pred = data["pred"]
+
+        pred_index = np.where(pred >= self._threshold)[0].astype("int32")
+        index = pred_index[np.argsort(pred[pred_index])][::-1]
+        clas_id_list = []
+        score_list = []
+        for i in index:
+            clas_id_list.append(i.item())
+            score_list.append(pred[i].item())
+
+        result = _PyOnlyMultilabelClassificationResult(
+            label_ids=clas_id_list, scores=score_list
+        )
+        return result
+
+
+class PyOnlyMultilabelClassificationModel(PyOnlyVisionModel):
+    def __init__(
+        self,
+        model_file,
+        params_file,
+        config_file,
+        runtime_option=None,
+        model_format=ModelFormat.PADDLE,
+    ):
+        self._model_file = model_file
+        self._params_file = params_file
+        self._model_format = model_format
+        super().__init__(runtime_option)
+        self._config = load_config(config_file)
+        self._preprocessor = _PyOnlyMultilabelClassificationPreprocessor(
+            self._config["PreProcess"]["transform_ops"]
+        )
+        self._postprocessor = _PyOnlyMultilabelClassificationPostprocessor(
+            self._config["PostProcess"]["MultiLabelThreshOutput"]
+        )
+
+    def model_name():
+        return "PyOnlyMultilabelImageClassificationModel"
+
+    def batch_predict(self, imgs):
+        data_list = []
+        for img in imgs:
+            data = {"img": img}
+            data = self._preprocessor.run(data)
+            data_list.append(data)
+
+        input_name = self._runtime.get_input_info(0).name
+        imgs = np.stack([data["img"] for data in data_list], axis=0, dtype=np.float32)
+        imgs = np.ascontiguousarray(imgs)
+        output_arrs = self._runtime.infer({input_name: imgs})
+
+        results = []
+        for pred in output_arrs[0]:
+            data = {"pred": pred}
+            result = self._postprocessor.run(data)
+            results.append(result)
+        return results
+
+    def _update_option(self):
+        self._option.set_model_path(
+            self._model_file, self._params_file, self._model_format
+        )

+ 145 - 0
libs/ultra_infer/python/ultra_infer/vision/classification/ppshitu/__init__.py

@@ -0,0 +1,145 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+from ...common import ProcessorManager
+from ...detection.ppdet import PicoDet
+
+
+class PPShiTuV2Detector(PicoDet):
+    """Detect main body from an input image."""
+
+    ...
+
+
+class PPShiTuV2RecognizerPreprocessor(ProcessorManager):
+    def __init__(self, config_file):
+        """Create a preprocessor for PPShiTuV2Recognizer from configuration file
+
+        :param config_file: (str)Path of configuration file, e.g PPLCNet/inference_cls.yaml
+        """
+        super(PPShiTuV2RecognizerPreprocessor, self).__init__()
+        self._manager = C.vision.classification.PPShiTuV2RecognizerPreprocessor(
+            config_file
+        )
+
+    def disable_normalize(self):
+        """
+        This function will disable normalize in preprocessing step.
+        """
+        self._manager.disable_normalize()
+
+    def disable_permute(self):
+        """
+        This function will disable hwc2chw in preprocessing step.
+        """
+        self._manager.disable_permute()
+
+    def initial_resize_on_cpu(self, v):
+        """
+        When the initial operator is Resize, and input image size is large,
+        maybe it's better to run resize on CPU, because the HostToDevice memcpy
+        is time consuming. Set this True to run the initial resize on CPU.
+        :param: v: True or False
+        """
+        self._manager.initial_resize_on_cpu(v)
+
+
+class PPShiTuV2RecognizerPostprocessor:
+    def __init__(self, topk=1):
+        """Create a postprocessor for PPShiTuV2Recognizer"""
+        self._postprocessor = C.vision.classification.PPShiTuV2RecognizerPostprocessor()
+
+    def run(self, runtime_results):
+        """Postprocess the runtime results for PPShiTuV2Recognizer
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :return: list of ClassifyResult, the feature vector is ClassifyResult.feature (If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results)
+
+
+class PPShiTuV2Recognizer(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file,
+        config_file,
+        runtime_option=None,
+        model_format=ModelFormat.PADDLE,
+    ):
+        """Load a image PPShiTuV2Recognizer model exported by PaddleClas.
+
+        :param model_file: (str)Path of model file, e.g PPLCNet/inference.pdmodel
+        :param params_file: (str)Path of parameters file, e.g PPLCNet/inference.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param config_file: (str) Path of configuration file for deploy, e.g PPLCNet/inference_cls.yaml
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+
+        super(PPShiTuV2Recognizer, self).__init__(runtime_option)
+        self._model = C.vision.classification.PPShiTuV2Recognizer(
+            model_file, params_file, config_file, self._runtime_option, model_format
+        )
+        assert self.initialized, "PPShiTuV2Recognizer model initialize failed."
+
+    def clone(self):
+        """Clone PPShiTuV2Recognizer object
+
+        :return: a new PPShiTuV2Recognizer object
+        """
+
+        class PPShiTuV2RecognizerCloneModel(PPShiTuV2Recognizer):
+            def __init__(self, model):
+                self._model = model
+
+        clone_model = PPShiTuV2RecognizerCloneModel(self._model.clone())
+        return clone_model
+
+    def predict(self, im):
+        """Extract feature from an input image
+
+        :param im: (numpy.ndarray) The input image data, a 3-D array with layout HWC, BGR format
+        :return: ClassifyResult
+        """
+
+        return self._model.predict(im)
+
+    def batch_predict(self, images):
+        """Extract features from a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of ClassifyResult, the feature vector is ClassifyResult.feature
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get PPShiTuV2RecognizerPreprocessor object of the loaded model
+
+        :return PPShiTuV2RecognizerPreprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get PPShiTuV2RecognizerPostprocessor object of the loaded model
+
+        :return PPShiTuV2RecognizerPostprocessor
+        """
+        return self._model.postprocessor

+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/common/__init__.py → libs/ultra_infer/python/ultra_infer/vision/common/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/common/manager.py → libs/ultra_infer/python/ultra_infer/vision/common/manager.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/common/processors.py → libs/ultra_infer/python/ultra_infer/vision/common/processors.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/detection/__init__.py → libs/ultra_infer/python/ultra_infer/vision/detection/__init__.py


+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/detection/contrib/__init__.py → libs/ultra_infer/python/ultra_infer/vision/detection/contrib/__init__.py


+ 157 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/fastestdet.py

@@ -0,0 +1,157 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class FastestDetPreprocessor:
+    def __init__(self):
+        """Create a preprocessor for FastestDet"""
+        self._preprocessor = C.vision.detection.FastestDetPreprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for FastestDet
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [352, 352]
+        """
+        return self._preprocessor.size
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+
+class FastestDetPostprocessor:
+    def __init__(self):
+        """Create a postprocessor for FastestDet"""
+        self._postprocessor = C.vision.detection.FastestDetPostprocessor()
+
+    def run(self, runtime_results, ims_info):
+        """Postprocess the runtime results for FastestDet
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results, ims_info)
+
+    @property
+    def conf_threshold(self):
+        """
+        confidence threshold for postprocessing, default is 0.65
+        """
+        return self._postprocessor.conf_threshold
+
+    @property
+    def nms_threshold(self):
+        """
+        nms threshold for postprocessing, default is 0.45
+        """
+        return self._postprocessor.nms_threshold
+
+    @conf_threshold.setter
+    def conf_threshold(self, conf_threshold):
+        assert isinstance(
+            conf_threshold, float
+        ), "The value to set `conf_threshold` must be type of float."
+        self._postprocessor.conf_threshold = conf_threshold
+
+    @nms_threshold.setter
+    def nms_threshold(self, nms_threshold):
+        assert isinstance(
+            nms_threshold, float
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.nms_threshold = nms_threshold
+
+
+class FastestDet(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a FastestDet model exported by FastestDet.
+
+        :param model_file: (str)Path of model file, e.g ./FastestDet.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+
+        super(FastestDet, self).__init__(runtime_option)
+
+        assert (
+            model_format == ModelFormat.ONNX
+        ), "FastestDet only support model format of ModelFormat.ONNX now."
+        self._model = C.vision.detection.FastestDet(
+            model_file, params_file, self._runtime_option, model_format
+        )
+
+        assert self.initialized, "FastestDet initialize failed."
+
+    def predict(self, input_image):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :return: DetectionResult
+        """
+        assert input_image is not None, "Input image is None."
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        assert len(images) == 1, "FastestDet is only support 1 image in batch_predict"
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get FastestDetPreprocessor object of the loaded model
+
+        :return FastestDetPreprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get FastestDetPostprocessor object of the loaded model
+
+        :return FastestDetPostprocessor
+        """
+        return self._model.postprocessor

+ 135 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/nanodet_plus.py

@@ -0,0 +1,135 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class NanoDetPlus(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a NanoDetPlus model exported by NanoDet.
+
+        :param model_file: (str)Path of model file, e.g ./nanodet.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(NanoDetPlus, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.NanoDetPlus(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "NanoDetPlus initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+        return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
+
+    # 一些跟NanoDetPlus模型有关的属性封装
+    # 多数是预处理相关,可通过修改如model.size = [416, 416]改变预处理时resize的大小(前提是模型支持)
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height),  default (320, 320)
+        """
+        return self._model.size
+
+    @property
+    def padding_value(self):
+        #  padding value, size should be the same as channels
+        return self._model.padding_value
+
+    @property
+    def keep_ratio(self):
+        # keep aspect ratio or not when perform resize operation. This option is set as false by default in NanoDet-Plus
+        return self._model.keep_ratio
+
+    @property
+    def downsample_strides(self):
+        # downsample strides for NanoDet-Plus to generate anchors, will take (8, 16, 32, 64) as default values
+        return self._model.downsample_strides
+
+    @property
+    def max_wh(self):
+        # for offseting the boxes by classes when using NMS, default 4096
+        return self._model.max_wh
+
+    @property
+    def reg_max(self):
+        """
+        reg_max for GFL regression, default 7
+        """
+        return self._model.reg_max
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._model.padding_value = value
+
+    @keep_ratio.setter
+    def keep_ratio(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `keep_ratio` must be type of bool."
+        self._model.keep_ratio = value
+
+    @downsample_strides.setter
+    def downsample_strides(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `downsample_strides` must be type of list."
+        self._model.downsample_strides = value
+
+    @max_wh.setter
+    def max_wh(self, value):
+        assert isinstance(
+            value, float
+        ), "The value to set `max_wh` must be type of float."
+        self._model.max_wh = value
+
+    @reg_max.setter
+    def reg_max(self, value):
+        assert isinstance(value, int), "The value to set `reg_max` must be type of int."
+        self._model.reg_max = value

+ 0 - 0
libs/ultrainfer/python/ultrainfer/vision/detection/contrib/rkyolo/__init__.py → libs/ultra_infer/python/ultra_infer/vision/detection/contrib/rkyolo/__init__.py


+ 315 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/rkyolo/rkyolov5.py

@@ -0,0 +1,315 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from ..... import UltraInferModel, ModelFormat
+from ..... import c_lib_wrap as C
+
+
+class RKYOLOPreprocessor:
+    def __init__(self):
+        """Create a preprocessor for RKYOLOV5"""
+        self._preprocessor = C.vision.detection.RKYOLOPreprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for RKYOLOV5
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._preprocessor.size
+
+    @property
+    def padding_value(self):
+        """
+        padding value for preprocessing, default [114.0, 114.0, 114.0]
+        """
+        #  padding value, size should be the same as channels
+        return self._preprocessor.padding_value
+
+    @property
+    def is_scale_up(self):
+        """
+        is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
+        """
+        return self._preprocessor.is_scale_up
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._preprocessor.padding_value = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._preprocessor.is_scale_up = value
+
+
+class RKYOLOPostprocessor:
+    def __init__(self):
+        """Create a postprocessor for RKYOLOV5"""
+        self._postprocessor = C.vision.detection.RKYOLOPostprocessor()
+
+    def run(self, runtime_results):
+        """Postprocess the runtime results for RKYOLOV5
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results)
+
+    def set_anchor(self, anchor):
+        self._postprocessor.set_anchor(anchor)
+
+    @property
+    def conf_threshold(self):
+        """
+        confidence threshold for postprocessing, default is 0.25
+        """
+        return self._postprocessor.conf_threshold
+
+    @property
+    def nms_threshold(self):
+        """
+        nms threshold for postprocessing, default is 0.5
+        """
+        return self._postprocessor.nms_threshold
+
+    @property
+    def class_num(self):
+        """
+        class_num for postprocessing, default is 80
+        """
+        return self._postprocessor.class_num
+
+    @conf_threshold.setter
+    def conf_threshold(self, conf_threshold):
+        assert isinstance(
+            conf_threshold, float
+        ), "The value to set `conf_threshold` must be type of float."
+        self._postprocessor.conf_threshold = conf_threshold
+
+    @nms_threshold.setter
+    def nms_threshold(self, nms_threshold):
+        assert isinstance(
+            nms_threshold, float
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.nms_threshold = nms_threshold
+
+    @class_num.setter
+    def class_num(self, class_num):
+        """
+        class_num for postprocessing, default is 80
+        """
+        assert isinstance(
+            class_num, int
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.class_num = class_num
+
+
+class RKYOLOV5(UltraInferModel):
+    def __init__(self, model_file, runtime_option=None, model_format=ModelFormat.RKNN):
+        """Load a RKYOLOV5 model exported by RKYOLOV5.
+
+        :param model_file: (str)Path of model file, e.g ./yolov5.rknn
+        :param params_file: (str)Path of parameters file, e.g , if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(RKYOLOV5, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.RKYOLOV5(
+            model_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "RKYOLOV5 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        self.postprocessor.conf_threshold = conf_threshold
+        self.postprocessor.nms_threshold = nms_iou_threshold
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get RKYOLOV5Preprocessor object of the loaded model
+
+        :return RKYOLOV5Preprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get RKYOLOV5Postprocessor object of the loaded model
+
+        :return RKYOLOV5Postprocessor
+        """
+        return self._model.postprocessor
+
+
+class RKYOLOX(UltraInferModel):
+    def __init__(self, model_file, runtime_option=None, model_format=ModelFormat.RKNN):
+        """Load a RKYOLOX model exported by RKYOLOX.
+
+        :param model_file: (str)Path of model file, e.g ./yolox.rknn
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(RKYOLOX, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.RKYOLOX(
+            model_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "RKYOLOV5 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        self.postprocessor.conf_threshold = conf_threshold
+        self.postprocessor.nms_threshold = nms_iou_threshold
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get RKYOLOV5Preprocessor object of the loaded model
+
+        :return RKYOLOV5Preprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get RKYOLOV5Postprocessor object of the loaded model
+
+        :return RKYOLOV5Postprocessor
+        """
+        return self._model.postprocessor
+
+
+class RKYOLOV7(UltraInferModel):
+    def __init__(self, model_file, runtime_option=None, model_format=ModelFormat.RKNN):
+        """Load a RKYOLOX model exported by RKYOLOV7.
+
+        :param model_file: (str)Path of model file, e.g ./yolov7.rknn
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(RKYOLOV7, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.RKYOLOV7(
+            model_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "RKYOLOV5 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        self.postprocessor.conf_threshold = conf_threshold
+        self.postprocessor.nms_threshold = nms_iou_threshold
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get RKYOLOV5Preprocessor object of the loaded model
+
+        :return RKYOLOV5Preprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get RKYOLOV5Postprocessor object of the loaded model
+
+        :return RKYOLOV5Postprocessor
+        """
+        return self._model.postprocessor

+ 146 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/scaled_yolov4.py

@@ -0,0 +1,146 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class ScaledYOLOv4(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a ScaledYOLOv4 model exported by ScaledYOLOv4.
+
+        :param model_file: (str)Path of model file, e.g ./scaled_yolov4.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(ScaledYOLOv4, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.ScaledYOLOv4(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "ScaledYOLOv4 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+        return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
+
+    # 一些跟ScaledYOLOv4模型有关的属性封装
+    # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持)
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+
+        """
+        return self._model.size
+
+    @property
+    def padding_value(self):
+        #  padding value, size should be the same as channels
+        return self._model.padding_value
+
+    @property
+    def is_no_pad(self):
+        # while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
+        return self._model.is_no_pad
+
+    @property
+    def is_mini_pad(self):
+        # only pad to the minimum rectange which height and width is times of stride
+        return self._model.is_mini_pad
+
+    @property
+    def is_scale_up(self):
+        # if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
+        return self._model.is_scale_up
+
+    @property
+    def stride(self):
+        # padding stride, for is_mini_pad
+        return self._model.stride
+
+    @property
+    def max_wh(self):
+        # for offseting the boxes by classes when using NMS
+        return self._model.max_wh
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._model.padding_value = value
+
+    @is_no_pad.setter
+    def is_no_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_no_pad` must be type of bool."
+        self._model.is_no_pad = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._model.is_mini_pad = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._model.is_scale_up = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._model.stride = value
+
+    @max_wh.setter
+    def max_wh(self, value):
+        assert isinstance(
+            value, float
+        ), "The value to set `max_wh` must be type of float."
+        self._model.max_wh = value

+ 145 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolor.py

@@ -0,0 +1,145 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOR(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOR model exported by YOLOR
+
+        :param model_file: (str)Path of model file, e.g ./yolor.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(YOLOR, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOR(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "YOLOR initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+        return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
+
+    # 一些跟YOLOR模型有关的属性封装
+    # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持)
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._model.size
+
+    @property
+    def padding_value(self):
+        #  padding value, size should be the same as channels
+        return self._model.padding_value
+
+    @property
+    def is_no_pad(self):
+        # while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
+        return self._model.is_no_pad
+
+    @property
+    def is_mini_pad(self):
+        # only pad to the minimum rectange which height and width is times of stride
+        return self._model.is_mini_pad
+
+    @property
+    def is_scale_up(self):
+        # if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
+        return self._model.is_scale_up
+
+    @property
+    def stride(self):
+        # padding stride, for is_mini_pad
+        return self._model.stride
+
+    @property
+    def max_wh(self):
+        # for offseting the boxes by classes when using NMS
+        return self._model.max_wh
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._model.padding_value = value
+
+    @is_no_pad.setter
+    def is_no_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_no_pad` must be type of bool."
+        self._model.is_no_pad = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._model.is_mini_pad = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._model.is_scale_up = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._model.stride = value
+
+    @max_wh.setter
+    def max_wh(self, value):
+        assert isinstance(
+            value, float
+        ), "The value to set `max_wh` must be type of float."
+        self._model.max_wh = value

+ 227 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5.py

@@ -0,0 +1,227 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv5Preprocessor:
+    def __init__(self):
+        """Create a preprocessor for YOLOv5"""
+        self._preprocessor = C.vision.detection.YOLOv5Preprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for YOLOv5
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._preprocessor.size
+
+    @property
+    def padding_value(self):
+        """
+        padding value for preprocessing, default [114.0, 114.0, 114.0]
+        """
+        #  padding value, size should be the same as channels
+        return self._preprocessor.padding_value
+
+    @property
+    def is_scale_up(self):
+        """
+        is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
+        """
+        return self._preprocessor.is_scale_up
+
+    @property
+    def is_mini_pad(self):
+        """
+        is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
+        """
+        return self._preprocessor.is_mini_pad
+
+    @property
+    def stride(self):
+        """
+        stride for preprocessing, only for mini_pad mode, default 32
+        """
+        return self._preprocessor.stride
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._preprocessor.padding_value = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._preprocessor.is_scale_up = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._preprocessor.is_mini_pad = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._preprocessor.stride = value
+
+
+class YOLOv5Postprocessor:
+    def __init__(self):
+        """Create a postprocessor for YOLOv5"""
+        self._postprocessor = C.vision.detection.YOLOv5Postprocessor()
+
+    def run(self, runtime_results, ims_info):
+        """Postprocess the runtime results for YOLOv5
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results, ims_info)
+
+    @property
+    def conf_threshold(self):
+        """
+        confidence threshold for postprocessing, default is 0.25
+        """
+        return self._postprocessor.conf_threshold
+
+    @property
+    def nms_threshold(self):
+        """
+        nms threshold for postprocessing, default is 0.5
+        """
+        return self._postprocessor.nms_threshold
+
+    @property
+    def multi_label(self):
+        """
+        multi_label for postprocessing, set true for eval, default is True
+        """
+        return self._postprocessor.multi_label
+
+    @conf_threshold.setter
+    def conf_threshold(self, conf_threshold):
+        assert isinstance(
+            conf_threshold, float
+        ), "The value to set `conf_threshold` must be type of float."
+        self._postprocessor.conf_threshold = conf_threshold
+
+    @nms_threshold.setter
+    def nms_threshold(self, nms_threshold):
+        assert isinstance(
+            nms_threshold, float
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.nms_threshold = nms_threshold
+
+    @multi_label.setter
+    def multi_label(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `multi_label` must be type of bool."
+        self._postprocessor.multi_label = value
+
+
+class YOLOv5(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv5 model exported by YOLOv5.
+
+        :param model_file: (str)Path of model file, e.g ./yolov5.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(YOLOv5, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOv5(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "YOLOv5 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        self.postprocessor.conf_threshold = conf_threshold
+        self.postprocessor.nms_threshold = nms_iou_threshold
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get YOLOv5Preprocessor object of the loaded model
+
+        :return YOLOv5Preprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get YOLOv5Postprocessor object of the loaded model
+
+        :return YOLOv5Postprocessor
+        """
+        return self._model.postprocessor

+ 191 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5lite.py

@@ -0,0 +1,191 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv5Lite(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv5Lite model exported by YOLOv5Lite.
+
+        :param model_file: (str)Path of model file, e.g ./yolov5lite.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(YOLOv5Lite, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOv5Lite(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "YOLOv5Lite initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+        return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
+
+    # 一些跟YOLOv5Lite模型有关的属性封装
+    # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持)
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._model.size
+
+    @property
+    def padding_value(self):
+        #  padding value, size should be the same as channels
+        return self._model.padding_value
+
+    @property
+    def is_no_pad(self):
+        # while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
+        return self._model.is_no_pad
+
+    @property
+    def is_mini_pad(self):
+        # only pad to the minimum rectange which height and width is times of stride
+        return self._model.is_mini_pad
+
+    @property
+    def is_scale_up(self):
+        # if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
+        return self._model.is_scale_up
+
+    @property
+    def stride(self):
+        # padding stride, for is_mini_pad
+        return self._model.stride
+
+    @property
+    def max_wh(self):
+        # for offseting the boxes by classes when using NMS
+        return self._model.max_wh
+
+    @property
+    def is_decode_exported(self):
+        """
+        whether the model_file was exported with decode module.
+        The official YOLOv5Lite/export.py script will export ONNX file without decode module.
+        Please set it 'true' manually if the model file was exported with decode module.
+        False : ONNX files without decode module. True : ONNX file with decode module.
+        default False
+        """
+        return self._model.is_decode_exported
+
+    @property
+    def anchor_config(self):
+        return self._model.anchor_config
+
+    @property
+    def downsample_strides(self):
+        """
+        downsample strides for YOLOv5Lite to generate anchors, will take (8,16,32) as default values, might have stride=64.
+        """
+        return self._model.downsample_strides
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._model.padding_value = value
+
+    @is_no_pad.setter
+    def is_no_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_no_pad` must be type of bool."
+        self._model.is_no_pad = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._model.is_mini_pad = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._model.is_scale_up = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._model.stride = value
+
+    @max_wh.setter
+    def max_wh(self, value):
+        assert isinstance(
+            value, float
+        ), "The value to set `max_wh` must be type of float."
+        self._model.max_wh = value
+
+    @is_decode_exported.setter
+    def is_decode_exported(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_decode_exported` must be type of bool."
+        self._model.is_decode_exported = value
+
+    @anchor_config.setter
+    def anchor_config(self, anchor_config_val):
+        assert isinstance(
+            anchor_config_val, list
+        ), "The value to set `anchor_config` must be type of tuple or list."
+        assert isinstance(
+            anchor_config_val[0], list
+        ), "The value to set `anchor_config` must be 2-dimensions tuple or list"
+        self._model.anchor_config = anchor_config_val
+
+    @downsample_strides.setter
+    def downsample_strides(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `downsample_strides` must be type of list."
+        self._model.downsample_strides = value

+ 222 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov5seg.py

@@ -0,0 +1,222 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv5SegPreprocessor:
+    def __init__(self):
+        """Create a preprocessor for YOLOv5Seg"""
+        self._preprocessor = C.vision.detection.YOLOv5SegPreprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for YOLOv5Seg
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._preprocessor.size
+
+    @property
+    def padding_value(self):
+        """
+        padding value for preprocessing, default [114.0, 114.0, 114.0]
+        """
+        #  padding value, size should be the same as channels
+        return self._preprocessor.padding_value
+
+    @property
+    def is_scale_up(self):
+        """
+        is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
+        """
+        return self._preprocessor.is_scale_up
+
+    @property
+    def is_mini_pad(self):
+        """
+        is_mini_pad for preprocessing, pad to the minimum rectange which height and width is times of stride, default false
+        """
+        return self._preprocessor.is_mini_pad
+
+    @property
+    def stride(self):
+        """
+        stride for preprocessing, only for mini_pad mode, default 32
+        """
+        return self._preprocessor.stride
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._preprocessor.padding_value = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._preprocessor.is_scale_up = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._preprocessor.is_mini_pad = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._preprocessor.stride = value
+
+
+class YOLOv5SegPostprocessor:
+    def __init__(self):
+        """Create a postprocessor for YOLOv5Seg"""
+        self._postprocessor = C.vision.detection.YOLOv5SegPostprocessor()
+
+    def run(self, runtime_results, ims_info):
+        """Postprocess the runtime results for YOLOv5Seg
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results, ims_info)
+
+    @property
+    def conf_threshold(self):
+        """
+        confidence threshold for postprocessing, default is 0.25
+        """
+        return self._postprocessor.conf_threshold
+
+    @property
+    def nms_threshold(self):
+        """
+        nms threshold for postprocessing, default is 0.5
+        """
+        return self._postprocessor.nms_threshold
+
+    @property
+    def multi_label(self):
+        """
+        multi_label for postprocessing, set true for eval, default is True
+        """
+        return self._postprocessor.multi_label
+
+    @conf_threshold.setter
+    def conf_threshold(self, conf_threshold):
+        assert isinstance(
+            conf_threshold, float
+        ), "The value to set `conf_threshold` must be type of float."
+        self._postprocessor.conf_threshold = conf_threshold
+
+    @nms_threshold.setter
+    def nms_threshold(self, nms_threshold):
+        assert isinstance(
+            nms_threshold, float
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.nms_threshold = nms_threshold
+
+    @multi_label.setter
+    def multi_label(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `multi_label` must be type of bool."
+        self._postprocessor.multi_label = value
+
+
+class YOLOv5Seg(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv5Seg model exported by YOLOv5.
+
+        :param model_file: (str)Path of model file, e.g ./yolov5s-seg.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        super(YOLOv5Seg, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOv5Seg(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        assert self.initialized, "YOLOv5Seg initialize failed."
+
+    def predict(self, input_image):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get YOLOv5SegPreprocessor object of the loaded model
+
+        :return YOLOv5SegPreprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get YOLOv5SegPostprocessor object of the loaded model
+
+        :return YOLOv5SegPostprocessor
+        """
+        return self._model.postprocessor

+ 145 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov6.py

@@ -0,0 +1,145 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv6(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv6 model exported by YOLOv6.
+
+        :param model_file: (str)Path of model file, e.g ./yolov6.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(YOLOv6, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOv6(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "YOLOv6 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threashold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threashold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+        return self._model.predict(input_image, conf_threshold, nms_iou_threshold)
+
+    # 一些跟YOLOv6模型有关的属性封装
+    # 多数是预处理相关,可通过修改如model.size = [1280, 1280]改变预处理时resize的大小(前提是模型支持)
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._model.size
+
+    @property
+    def padding_value(self):
+        #  padding value, size should be the same as channels
+        return self._model.padding_value
+
+    @property
+    def is_no_pad(self):
+        # while is_mini_pad = false and is_no_pad = true, will resize the image to the set size
+        return self._model.is_no_pad
+
+    @property
+    def is_mini_pad(self):
+        # only pad to the minimum rectange which height and width is times of stride
+        return self._model.is_mini_pad
+
+    @property
+    def is_scale_up(self):
+        # if is_scale_up is false, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0
+        return self._model.is_scale_up
+
+    @property
+    def stride(self):
+        # padding stride, for is_mini_pad
+        return self._model.stride
+
+    @property
+    def max_wh(self):
+        # for offseting the boxes by classes when using NMS
+        return self._model.max_wh
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._model.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._model.padding_value = value
+
+    @is_no_pad.setter
+    def is_no_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_no_pad` must be type of bool."
+        self._model.is_no_pad = value
+
+    @is_mini_pad.setter
+    def is_mini_pad(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_mini_pad` must be type of bool."
+        self._model.is_mini_pad = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._model.is_scale_up = value
+
+    @stride.setter
+    def stride(self, value):
+        assert isinstance(value, int), "The value to set `stride` must be type of int."
+        self._model.stride = value
+
+    @max_wh.setter
+    def max_wh(self, value):
+        assert isinstance(
+            value, float
+        ), "The value to set `max_wh` must be type of float."
+        self._model.max_wh = value

+ 187 - 0
libs/ultra_infer/python/ultra_infer/vision/detection/contrib/yolov7.py

@@ -0,0 +1,187 @@
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from __future__ import absolute_import
+import logging
+from .... import UltraInferModel, ModelFormat
+from .... import c_lib_wrap as C
+
+
+class YOLOv7Preprocessor:
+    def __init__(self):
+        """Create a preprocessor for YOLOv7"""
+        self._preprocessor = C.vision.detection.YOLOv7Preprocessor()
+
+    def run(self, input_ims):
+        """Preprocess input images for YOLOv7
+
+        :param: input_ims: (list of numpy.ndarray)The input image
+        :return: list of FDTensor
+        """
+        return self._preprocessor.run(input_ims)
+
+    @property
+    def size(self):
+        """
+        Argument for image preprocessing step, the preprocess image size, tuple of (width, height), default size = [640, 640]
+        """
+        return self._preprocessor.size
+
+    @property
+    def padding_value(self):
+        """
+        padding value for preprocessing, default [114.0, 114.0, 114.0]
+        """
+        #  padding value, size should be the same as channels
+        return self._preprocessor.padding_value
+
+    @property
+    def is_scale_up(self):
+        """
+        is_scale_up for preprocessing, the input image only can be zoom out, the maximum resize scale cannot exceed 1.0, default true
+        """
+        return self._preprocessor.is_scale_up
+
+    @size.setter
+    def size(self, wh):
+        assert isinstance(
+            wh, (list, tuple)
+        ), "The value to set `size` must be type of tuple or list."
+        assert (
+            len(wh) == 2
+        ), "The value to set `size` must contatins 2 elements means [width, height], but now it contains {} elements.".format(
+            len(wh)
+        )
+        self._preprocessor.size = wh
+
+    @padding_value.setter
+    def padding_value(self, value):
+        assert isinstance(
+            value, list
+        ), "The value to set `padding_value` must be type of list."
+        self._preprocessor.padding_value = value
+
+    @is_scale_up.setter
+    def is_scale_up(self, value):
+        assert isinstance(
+            value, bool
+        ), "The value to set `is_scale_up` must be type of bool."
+        self._preprocessor.is_scale_up = value
+
+
+class YOLOv7Postprocessor:
+    def __init__(self):
+        """Create a postprocessor for YOLOv7"""
+        self._postprocessor = C.vision.detection.YOLOv7Postprocessor()
+
+    def run(self, runtime_results, ims_info):
+        """Postprocess the runtime results for YOLOv7
+
+        :param: runtime_results: (list of FDTensor)The output FDTensor results from runtime
+        :param: ims_info: (list of dict)Record input_shape and output_shape
+        :return: list of DetectionResult(If the runtime_results is predict by batched samples, the length of this list equals to the batch size)
+        """
+        return self._postprocessor.run(runtime_results, ims_info)
+
+    @property
+    def conf_threshold(self):
+        """
+        confidence threshold for postprocessing, default is 0.25
+        """
+        return self._postprocessor.conf_threshold
+
+    @property
+    def nms_threshold(self):
+        """
+        nms threshold for postprocessing, default is 0.5
+        """
+        return self._postprocessor.nms_threshold
+
+    @conf_threshold.setter
+    def conf_threshold(self, conf_threshold):
+        assert isinstance(
+            conf_threshold, float
+        ), "The value to set `conf_threshold` must be type of float."
+        self._postprocessor.conf_threshold = conf_threshold
+
+    @nms_threshold.setter
+    def nms_threshold(self, nms_threshold):
+        assert isinstance(
+            nms_threshold, float
+        ), "The value to set `nms_threshold` must be type of float."
+        self._postprocessor.nms_threshold = nms_threshold
+
+
+class YOLOv7(UltraInferModel):
+    def __init__(
+        self,
+        model_file,
+        params_file="",
+        runtime_option=None,
+        model_format=ModelFormat.ONNX,
+    ):
+        """Load a YOLOv7 model exported by YOLOv7.
+
+        :param model_file: (str)Path of model file, e.g ./yolov7.onnx
+        :param params_file: (str)Path of parameters file, e.g yolox/model.pdiparams, if the model_fomat is ModelFormat.ONNX, this param will be ignored, can be set as empty string
+        :param runtime_option: (ultra_infer.RuntimeOption)RuntimeOption for inference this model, if it's None, will use the default backend on CPU
+        :param model_format: (ultra_infer.ModelForamt)Model format of the loaded model
+        """
+        # 调用基函数进行backend_option的初始化
+        # 初始化后的option保存在self._runtime_option
+        super(YOLOv7, self).__init__(runtime_option)
+
+        self._model = C.vision.detection.YOLOv7(
+            model_file, params_file, self._runtime_option, model_format
+        )
+        # 通过self.initialized判断整个模型的初始化是否成功
+        assert self.initialized, "YOLOv7 initialize failed."
+
+    def predict(self, input_image, conf_threshold=0.25, nms_iou_threshold=0.5):
+        """Detect an input image
+
+        :param input_image: (numpy.ndarray)The input image data, 3-D array with layout HWC, BGR format
+        :param conf_threshold: confidence threshold for postprocessing, default is 0.25
+        :param nms_iou_threshold: iou threshold for NMS, default is 0.5
+        :return: DetectionResult
+        """
+
+        self.postprocessor.conf_threshold = conf_threshold
+        self.postprocessor.nms_threshold = nms_iou_threshold
+        return self._model.predict(input_image)
+
+    def batch_predict(self, images):
+        """Classify a batch of input image
+
+        :param im: (list of numpy.ndarray) The input image list, each element is a 3-D array with layout HWC, BGR format
+        :return list of DetectionResult
+        """
+
+        return self._model.batch_predict(images)
+
+    @property
+    def preprocessor(self):
+        """Get YOLOv7Preprocessor object of the loaded model
+
+        :return YOLOv7Preprocessor
+        """
+        return self._model.preprocessor
+
+    @property
+    def postprocessor(self):
+        """Get YOLOv7Postprocessor object of the loaded model
+
+        :return YOLOv7Postprocessor
+        """
+        return self._model.postprocessor

Некоторые файлы не были показаны из-за большого количества измененных файлов