Ver Fonte

add OpenVINO engine (#998)

* add openvino

* fix openvino input

* fix openvino output type

* fix openvino cmake trt

* fix openvino type
heliqi há 4 anos atrás
pai
commit
f2fce32bbf

+ 36 - 177
deploy/cpp/CMakeLists.txt

@@ -1,21 +1,29 @@
 cmake_minimum_required(VERSION 3.0)
 project(PaddleDeploy CXX C)
 
-option(WITH_MKL        "Compile demo with MKL/OpenBlas support,defaultuseMKL."          ON)
-option(WITH_GPU        "Compile demo with GPU/CPU, default use CPU."                    OFF)
-if (NOT WIN32)
-    option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   OFF)
+if (WIN32)
+  option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   ON)
 else()
-    option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   ON)
+  option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   OFF)
 endif()
-option(WITH_TENSORRT "Compile demo with TensorRT."   OFF)
-option(WITH_ENCRYPTION "Compile demo with ENCRYPTION."   OFF)
+# Paddle
+option(WITH_MKL        "Compile demo with MKL/OpenBlas support,defaultuseMKL."          ON)
+option(WITH_GPU        "Compile demo with GPU/CPU, default use CPU."                    OFF)
+option(WITH_PADDLE_TENSORRT "Compile demo with TensorRT."   OFF)
+#other engine
+option(WITH_OPENVINO "Compile demo with TensorRT."   OFF)
+option(WITH_ONNX_TENSORRT "Compile demo with TensorRT."   OFF)
 
+SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
+SET(DEPS "" CACHE PATH "Location of libraries")
+# Paddle
 SET(TENSORRT_DIR "" CACHE PATH "Location of libraries")
 SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
-SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
 SET(CUDA_LIB "" CACHE PATH "Location of libraries")
-SET(OPENSSL_DIR "" CACHE PATH "Location of libraries")
+#OpenVINO
+SET(GFLAGS_DIR "" CACHE PATH "Location of libraries")
+SET(OPENVINO_DIR "" CACHE PATH "Location of libraries")
+SET(NGRAPH_LIB "" CACHE PATH "Location of libraries")
 
 SET(PROJECT_ROOT_DIR  "." CACHE PATH  "root directory of project.")
 
@@ -30,9 +38,17 @@ else()
     add_definitions(-DPADDLEX_DEPLOY)
 endif()
 
-#source
-include_directories("${CMAKE_SOURCE_DIR}/")
-link_directories("${CMAKE_CURRENT_BINARY_DIR}")
+#project
+include_directories("${PROJECT_SOURCE_DIR}")
+
+# common
+aux_source_directory(${PROJECT_SOURCE_DIR}/model_deploy/common/src SRC)
+
+# det seg clas pdx src
+aux_source_directory(${PROJECT_SOURCE_DIR}/model_deploy/ppdet/src DETECTOR_SRC)
+aux_source_directory(${PROJECT_SOURCE_DIR}/model_deploy/ppseg/src DETECTOR_SRC)
+aux_source_directory(${PROJECT_SOURCE_DIR}/model_deploy/ppclas/src DETECTOR_SRC)
+aux_source_directory(${PROJECT_SOURCE_DIR}/model_deploy/paddlex/src DETECTOR_SRC)
 
 #yaml-cpp
 if(WIN32)
@@ -40,68 +56,10 @@ if(WIN32)
 else()
   SET(YAML_BUILD_SHARED_LIBS ON CACHE BOOL "yaml build shared library.")
 endif(WIN32)
-include(${PROJECT_ROOT_DIR}/cmake/yaml-cpp.cmake)
+include(${PROJECT_SOURCE_DIR}/cmake/yaml-cpp.cmake)
 include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
 link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
 
-#paddle inference
-if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
-    message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
-endif()
-
-#paddle inference third party
-include_directories("${PADDLE_DIR}")
-include_directories("${PADDLE_DIR}/third_party/install/protobuf/include")
-include_directories("${PADDLE_DIR}/third_party/install/glog/include")
-include_directories("${PADDLE_DIR}/third_party/install/gflags/include")
-include_directories("${PADDLE_DIR}/third_party/install/xxhash/include")
-include_directories("${PADDLE_DIR}/third_party/install/cryptopp/include")
-
-link_directories("${PADDLE_DIR}/paddle/lib/")
-link_directories("${PADDLE_DIR}/third_party/install/protobuf/lib")
-link_directories("${PADDLE_DIR}/third_party/install/glog/lib")
-link_directories("${PADDLE_DIR}/third_party/install/gflags/lib")
-link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
-link_directories("${PADDLE_DIR}/third_party/install/cryptopp/lib")
-
-if (WIN32)
-  set(DEPS ${DEPS} ${PADDLE_DIR}/paddle/lib/paddle_inference.lib)
-  set(DEPS ${DEPS} glog gflags_static libprotobuf xxhash cryptopp-static libyaml-cppmt shlwapi)
-else()
-  if (WITH_STATIC_LIB)
-    set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
-  else()
-    set(DEPS ${PADDLE_DIR}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
-  endif()
-  set(DEPS ${DEPS} glog gflags protobuf xxhash cryptopp yaml-cpp)
-endif(WIN32)
-
-#MKL
-if(WITH_MKL)
-  ADD_DEFINITIONS(-DUSE_MKL)
-  set(MKLML_PATH "${PADDLE_DIR}/third_party/install/mklml")
-  include_directories("${MKLML_PATH}/include")
-  if (WIN32)
-    set(MATH_LIB ${MKLML_PATH}/lib/mklml.lib ${MKLML_PATH}/lib/libiomp5md.lib)
-  else ()
-    set(MATH_LIB ${MKLML_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MKLML_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
-    execute_process(COMMAND cp -r ${MKLML_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
-  endif ()
-  set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
-  if(EXISTS ${MKLDNN_PATH})
-    include_directories("${MKLDNN_PATH}/include")
-    if (WIN32)
-      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
-    else ()
-      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
-    endif ()
-  endif()
-else()
-  set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
-endif()
-
-set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB})
-
 #OPENCV
 if (NOT (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64"))
   if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
@@ -125,62 +83,7 @@ endif ()
 set(DEPS ${DEPS} ${OpenCV_LIBS})
 include_directories(${OpenCV_INCLUDE_DIRS})
 
-
-if (WITH_TENSORRT AND WITH_GPU)
-  include_directories("${TENSORRT_DIR}/include")
-  link_directories("${TENSORRT_DIR}/lib")
-
-  file(READ ${TENSORRT_DIR}/include/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
-  string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
-    "${TENSORRT_VERSION_FILE_CONTENTS}")
-  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
-    file(READ ${TENSORRT_DIR}/include/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
-    string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
-      "${TENSORRT_VERSION_FILE_CONTENTS}")
-  endif()
-  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
-    message(SEND_ERROR "Failed to detect TensorRT version.")
-  endif()
-  string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
-    TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
-  message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
-    "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
-endif()
-
-#set GPU
-if(WITH_GPU)
-  if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
-    message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda/lib64")
-  endif()
-
-  if(NOT WIN32)
-    if (NOT DEFINED CUDNN_LIB)
-      message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn/")
-    endif()
-
-    set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
-    set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
-
-    if (WITH_TENSORRT)
-      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
-      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
-    endif()
-
-  else()
-    set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
-    set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
-    set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
-
-    if (WITH_TENSORRT)
-      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
-      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
-      if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
-        set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
-      endif()
-    endif()
-  endif()
-endif()
-
+# MD
 macro(safe_set_static_flag)
     foreach(flag_var
         CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
@@ -216,20 +119,7 @@ else()
     set(DEPS ${DEPS} ${EXTERNAL_LIB})
 endif()
 
-message("-----DEPS = ${DEPS}")
-
-#project
-include_directories("${PROJECT_ROOT_DIR}")
-
-aux_source_directory(${PROJECT_ROOT_DIR}/model_deploy/common/src SRC)
-set(ENGINE_SRC ${PROJECT_ROOT_DIR}/model_deploy/engine/src/ppinference_engine.cpp)
-
-#detector seg
-aux_source_directory(${PROJECT_ROOT_DIR}/model_deploy/ppdet/src DETECTOR_SRC)
-aux_source_directory(${PROJECT_ROOT_DIR}/model_deploy/ppseg/src DETECTOR_SRC)
-aux_source_directory(${PROJECT_ROOT_DIR}/model_deploy/ppclas/src DETECTOR_SRC)
-aux_source_directory(${PROJECT_ROOT_DIR}/model_deploy/paddlex/src DETECTOR_SRC)
-
+# encryption
 set(ENCRYPTION_SRC "")
 if (WITH_ENCRYPTION)
   add_definitions(-DPADDLEX_DEPLOY_ENCRYPTION)
@@ -248,40 +138,9 @@ if (WITH_ENCRYPTION)
   aux_source_directory(${PROJECT_ROOT_DIR}/encryption/util/src/crypto ENCRYPTION_SRC)
 endif()
 
-add_executable(model_infer ${PROJECT_ROOT_DIR}/demo/model_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
-ADD_DEPENDENCIES(model_infer ext-yaml-cpp)
-target_link_libraries(model_infer ${DEPS})
-
-add_executable(batch_infer ${PROJECT_ROOT_DIR}/demo/batch_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
-ADD_DEPENDENCIES(batch_infer ext-yaml-cpp)
-target_link_libraries(batch_infer ${DEPS})
-
-add_executable(multi_gpu_model_infer ${PROJECT_ROOT_DIR}/demo/multi_gpu_model_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
-ADD_DEPENDENCIES(multi_gpu_model_infer ext-yaml-cpp)
-target_link_libraries(multi_gpu_model_infer ${DEPS})
-
-if (WITH_TENSORRT)
-  add_executable(tensorrt_infer ${PROJECT_ROOT_DIR}/demo/tensorrt_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
-  ADD_DEPENDENCIES(tensorrt_infer ext-yaml-cpp)
-  target_link_libraries(tensorrt_infer ${DEPS})
-endif()
-
-if(WIN32)
-  add_custom_command(TARGET model_infer POST_BUILD
-    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll  ${CMAKE_BINARY_DIR}/paddle_deploy
-    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/paddle/lib/paddle_inference.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-  )
-  if (WITH_TENSORRT)
-    add_custom_command(TARGET model_infer POST_BUILD
-      COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/nvinfer.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-      COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/nvinfer_plugin.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-    )
-    if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
-      add_custom_command(TARGET model_infer POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/myelin64_1.dll ${CMAKE_BINARY_DIR}/paddle_deploy
-      )
-    endif()
-  endif()
+# select engine
+if(WITH_OPENVINO)
+  add_subdirectory(demo/onnx_openvino)
+else ()
+  add_subdirectory(demo)
 endif()

+ 21 - 1
deploy/cpp/CMakeSettings.json

@@ -44,7 +44,7 @@
           "type": "BOOL"
         },
         {
-          "name": "WITH_TENSORRT",
+          "name": "WITH_PADDLE_TENSORRT",
           "value": "False",
           "type": "BOOL"
         },
@@ -57,6 +57,26 @@
           "name": "OPENSSL_DIR",
           "value": "path\\to\\openssl1.1.0k",
           "type": "PATH"
+        },
+        {
+          "name": "WITH_OPENVINO",
+          "value": "False",
+          "type": "BOOL"
+        },
+        {
+          "name": "GFLAGS_DIR",
+          "value": "path\\to\\gflags",
+          "type": "PATH"
+        },
+        {
+          "name": "OPENVINO_DIR",
+          "value": "path\\to\\inference_engine",
+          "type": "PATH"
+        },
+        {
+          "name": "NGRAPH_LIB",
+          "value": "path\\to\\deployment_tools/ngraph",
+          "type": "PATH"
         }
       ]
     }

+ 157 - 0
deploy/cpp/demo/CMakeLists.txt

@@ -0,0 +1,157 @@
+#paddle inference
+if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
+    message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
+endif()
+
+#paddle inference third party
+include_directories("${PADDLE_DIR}")
+include_directories("${PADDLE_DIR}/third_party/install/protobuf/include")
+include_directories("${PADDLE_DIR}/third_party/install/glog/include")
+include_directories("${PADDLE_DIR}/third_party/install/gflags/include")
+include_directories("${PADDLE_DIR}/third_party/install/xxhash/include")
+include_directories("${PADDLE_DIR}/third_party/install/cryptopp/include")
+
+link_directories("${PADDLE_DIR}/paddle/lib/")
+link_directories("${PADDLE_DIR}/third_party/install/protobuf/lib")
+link_directories("${PADDLE_DIR}/third_party/install/glog/lib")
+link_directories("${PADDLE_DIR}/third_party/install/gflags/lib")
+link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
+link_directories("${PADDLE_DIR}/third_party/install/cryptopp/lib")
+
+if (WIN32)
+  set(DEPS ${DEPS} ${PADDLE_DIR}/paddle/lib/paddle_inference.lib)
+  set(DEPS ${DEPS} glog gflags_static libprotobuf xxhash cryptopp-static libyaml-cppmt shlwapi)
+else()
+  if (WITH_STATIC_LIB)
+    set(DEPS ${DEPS} ${PADDLE_DIR}/paddle/lib/libpaddle_inference${CMAKE_STATIC_LIBRARY_SUFFIX})
+  else()
+    set(DEPS ${DEPS} ${PADDLE_DIR}/paddle/lib/libpaddle_inference${CMAKE_SHARED_LIBRARY_SUFFIX})
+  endif()
+  set(DEPS ${DEPS} glog gflags protobuf xxhash cryptopp yaml-cpp)
+endif(WIN32)
+
+#MKL
+if(WITH_MKL)
+  ADD_DEFINITIONS(-DUSE_MKL)
+  set(MKLML_PATH "${PADDLE_DIR}/third_party/install/mklml")
+  include_directories("${MKLML_PATH}/include")
+  if (WIN32)
+    set(MATH_LIB ${MKLML_PATH}/lib/mklml.lib ${MKLML_PATH}/lib/libiomp5md.lib)
+  else ()
+    set(MATH_LIB ${MKLML_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} ${MKLML_PATH}/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
+    execute_process(COMMAND cp -r ${MKLML_PATH}/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
+  endif ()
+  set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
+  if(EXISTS ${MKLDNN_PATH})
+    include_directories("${MKLDNN_PATH}/include")
+    if (WIN32)
+      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
+    else ()
+      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
+    endif ()
+  endif()
+else()
+  set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+endif()
+
+set(DEPS ${DEPS} ${MATH_LIB} ${MKLDNN_LIB})
+
+#set GPU
+if (WITH_PADDLE_TENSORRT AND WITH_GPU)
+  include_directories("${TENSORRT_DIR}/include")
+  link_directories("${TENSORRT_DIR}/lib")
+
+  file(READ ${TENSORRT_DIR}/include/NvInfer.h TENSORRT_VERSION_FILE_CONTENTS)
+  string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
+    "${TENSORRT_VERSION_FILE_CONTENTS}")
+  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
+    file(READ ${TENSORRT_DIR}/include/NvInferVersion.h TENSORRT_VERSION_FILE_CONTENTS)
+    string(REGEX MATCH "define NV_TENSORRT_MAJOR +([0-9]+)" TENSORRT_MAJOR_VERSION
+      "${TENSORRT_VERSION_FILE_CONTENTS}")
+  endif()
+  if("${TENSORRT_MAJOR_VERSION}" STREQUAL "")
+    message(SEND_ERROR "Failed to detect TensorRT version.")
+  endif()
+  string(REGEX REPLACE "define NV_TENSORRT_MAJOR +([0-9]+)" "\\1"
+    TENSORRT_MAJOR_VERSION "${TENSORRT_MAJOR_VERSION}")
+  message(STATUS "Current TensorRT header is ${TENSORRT_INCLUDE_DIR}/NvInfer.h. "
+    "Current TensorRT version is v${TENSORRT_MAJOR_VERSION}. ")
+endif()
+
+if(WITH_GPU)
+  if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
+    message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda/lib64")
+  endif()
+
+  
+  if(NOT WIN32)
+    if (NOT DEFINED CUDNN_LIB)
+      message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn/")
+    endif()
+
+    set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
+    set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
+
+    if (WITH_PADDLE_TENSORRT)
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
+    endif()
+
+  else()
+    set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
+    set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
+    set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
+
+    if (WITH_PADDLE_TENSORRT)
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/nvinfer${CMAKE_STATIC_LIBRARY_SUFFIX})
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/nvinfer_plugin${CMAKE_STATIC_LIBRARY_SUFFIX})
+      if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
+        set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/myelin64_1${CMAKE_STATIC_LIBRARY_SUFFIX})
+      endif()
+    endif()
+  endif()
+endif()
+
+message("-----DEPS = ${DEPS}")
+
+# engine src
+set(ENGINE_SRC ${PROJECT_SOURCE_DIR}/model_deploy/engine/src/ppinference_engine.cpp)
+
+
+add_executable(model_infer model_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
+ADD_DEPENDENCIES(model_infer ext-yaml-cpp)
+target_link_libraries(model_infer ${DEPS})
+
+add_executable(batch_infer batch_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
+ADD_DEPENDENCIES(batch_infer ext-yaml-cpp)
+target_link_libraries(batch_infer ${DEPS})
+
+add_executable(multi_gpu_model_infer multi_gpu_model_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
+ADD_DEPENDENCIES(multi_gpu_model_infer ext-yaml-cpp)
+target_link_libraries(multi_gpu_model_infer ${DEPS})
+
+if (WITH_PADDLE_TENSORRT)
+  add_executable(tensorrt_infer tensorrt_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
+  ADD_DEPENDENCIES(tensorrt_infer ext-yaml-cpp)
+  target_link_libraries(tensorrt_infer ${DEPS})
+endif()
+
+if(WIN32)
+  add_custom_command(TARGET model_infer POST_BUILD
+    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll  ${CMAKE_BINARY_DIR}/paddle_deploy
+    COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/paddle/lib/paddle_inference.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+  )
+  if (WITH_PADDLE_TENSORRT)
+    add_custom_command(TARGET model_infer POST_BUILD
+      COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/nvinfer.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+      COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/nvinfer_plugin.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+    )
+    if(${TENSORRT_MAJOR_VERSION} GREATER_EQUAL 7)
+      add_custom_command(TARGET model_infer POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy ${TENSORRT_DIR}/lib/myelin64_1.dll ${CMAKE_BINARY_DIR}/paddle_deploy
+      )
+    endif()
+  endif()
+endif()

+ 48 - 0
deploy/cpp/demo/onnx_openvino/CMakeLists.txt

@@ -0,0 +1,48 @@
+# Openvino
+include_directories("${OPENVINO_DIR}")
+include_directories("${OPENVINO_DIR}/include")
+include_directories("${OPENVINO_DIR}/external/tbb/include/tbb")
+link_directories("${OPENVINO_DIR}/lib")
+link_directories("${OPENVINO_DIR}/external/tbb/lib")
+
+link_directories("${NGRAPH_LIB}/lib")
+
+include_directories("${GFLAGS_DIR}/include")
+link_directories("${GFLAGS_DIR}/lib")
+
+if(WIN32)
+    link_directories("${OPENVINO_DIR}/lib/intel64/Release")
+    link_directories("${OPENVINO_DIR}/bin/intel64/Release")
+
+    set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/Release/inference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+    set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/Release/inference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+    set(DEPS ${DEPS} gflags_static libyaml-cppmt shlwapi)
+else()
+    if (${ARCH} STREQUAL "armv7")
+        if(WITH_STATIC_LIB)
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+        else()
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/armv7l/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
+        endif()
+    else()
+        if(WITH_STATIC_LIB)
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_STATIC_LIBRARY_SUFFIX})
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_STATIC_LIBRARY_SUFFIX})
+        else()
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine${CMAKE_SHARED_LIBRARY_SUFFIX})
+            set(DEPS ${DEPS} ${OPENVINO_DIR}/lib/intel64/libinference_engine_legacy${CMAKE_SHARED_LIBRARY_SUFFIX})
+        endif()
+    endif()
+    set(DEPS ${DEPS} gflags yaml-cpp)
+endif(WIN32)
+
+message("-----DEPS = ${DEPS}")
+
+# engine src
+set(ENGINE_SRC ${PROJECT_SOURCE_DIR}/model_deploy/engine/src/openvino_engine.cpp)
+
+add_executable(model_infer model_infer.cpp ${SRC} ${ENGINE_SRC} ${DETECTOR_SRC})
+ADD_DEPENDENCIES(model_infer ext-yaml-cpp)
+target_link_libraries(model_infer ${DEPS})

+ 62 - 0
deploy/cpp/demo/onnx_openvino/model_infer.cpp

@@ -0,0 +1,62 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gflags/gflags.h>
+#include <string>
+#include <vector>
+
+#include <opencv2/core/core.hpp>
+#include <opencv2/highgui/highgui.hpp>
+#include <opencv2/imgproc/imgproc.hpp>
+
+#include "model_deploy/common/include/paddle_deploy.h"
+
+DEFINE_string(xml_file, "", "Path of model xml file");
+DEFINE_string(bin_file, "", "Path of model bin file");
+DEFINE_string(cfg_file, "", "Path of yaml file");
+DEFINE_string(model_type, "", "model type");
+DEFINE_string(image, "", "Path of test image file");
+DEFINE_string(device, "CPU", "Infering with VPU or CPU");
+
+int main(int argc, char** argv) {
+  // Parsing command-line
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  // create model
+  PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
+
+
+  // model init
+  model->Init(FLAGS_cfg_file);
+
+  // engine init
+  PaddleDeploy::OpenVinoEngineConfig engine_config;
+  engine_config.xml_file_ = FLAGS_xml_file;
+  engine_config.bin_file_ = FLAGS_bin_file;
+  engine_config.batch_size_ = 1;
+  engine_config.device_ = FLAGS_device;
+  model->OpenVinoEngineInit(engine_config);
+
+  // prepare data
+  std::vector<cv::Mat> imgs;
+  imgs.push_back(std::move(cv::imread(FLAGS_image)));
+
+  // predict
+  std::vector<PaddleDeploy::Result> results;
+  model->Predict(imgs, &results, 1);
+
+  std::cout << results[0] << std::endl;
+  delete model;
+  return 0;
+}

+ 2 - 0
deploy/cpp/model_deploy/common/include/base_model.h

@@ -76,6 +76,8 @@ class PD_INFER_DECL Model {
 
   bool TensorRTInit(const TensorRTEngineConfig& engine_config);
 
+  bool OpenVinoEngineInit(const OpenVinoEngineConfig& engine_config);
+
   virtual bool PostprocessInit() {
     postprocess_ = nullptr;
     std::cerr << "model no Postprocess!" << std::endl;

+ 23 - 3
deploy/cpp/model_deploy/engine/include/engine_config.h

@@ -162,12 +162,27 @@ struct TensorRTEngineConfig {
   YAML::Node yaml_config_;
 };
 
+struct OpenVinoEngineConfig {
+  // openvino xml file path
+  std::string xml_file_ = "";
+
+  // openvino bin file path
+  std::string bin_file_ = "";
+
+  //  Set batchsize
+  int batch_size_ = 1;
+
+  //  Set Device {CPU, MYRIAD}
+  std::string device_ = "CPU";
+};
+
 struct InferenceConfig {
   std::string engine_type;
   union {
     PaddleEngineConfig* paddle_config;
     TritonEngineConfig* triton_config;
     TensorRTEngineConfig* tensorrt_config;
+    OpenVinoEngineConfig* openvino_config;
   };
 
   InferenceConfig() {
@@ -182,6 +197,8 @@ struct InferenceConfig {
       triton_config = new TritonEngineConfig();
     } else if ("tensorrt" == engine_type) {
       tensorrt_config = new TensorRTEngineConfig();
+    } else if ("openvino" == engine_type) {
+      openvino_config = new OpenVinoEngineConfig();
     }
   }
 
@@ -196,20 +213,23 @@ struct InferenceConfig {
     } else if ("tensorrt" == engine_type) {
       tensorrt_config = new TensorRTEngineConfig();
       *tensorrt_config = *(config.tensorrt_config);
+    } else if ("openvino" == engine_type) {
+      openvino_config = new OpenVinoEngineConfig();
+      *openvino_config = *(config.openvino_config);
     }
   }
 
   ~InferenceConfig() {
     if ("paddle" == engine_type) {
       delete paddle_config;
-      paddle_config = NULL;
     } else if ("triton" == engine_type) {
       delete triton_config;
-      triton_config = NULL;
     } else if ("tensorrt" == engine_type) {
       delete tensorrt_config;
-      tensorrt_config = NULL;
+    } else if ("openvino" == engine_type) {
+      delete openvino_config;
     }
+    paddle_config = NULL;
   }
 };
 

+ 44 - 0
deploy/cpp/model_deploy/engine/include/openvino_engine.h

@@ -0,0 +1,44 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#pragma once
+
+#include <string>
+#include <vector>
+
+#include <inference_engine.hpp>
+
+#include "model_deploy/common/include/base_model.h"
+#include "model_deploy/common/include/output_struct.h"
+#include "model_deploy/engine/include/engine.h"
+
+namespace PaddleDeploy {
+
+class OpenVinoEngine : public InferEngine {
+ public:
+  virtual bool Init(const InferenceConfig& engine_config);
+
+  virtual bool Infer(const std::vector<DataBlob>& inputs,
+                     std::vector<DataBlob>* outputs);
+
+ private:
+  bool GetDtype(const InferenceEngine::TensorDesc &output_blob,
+                DataBlob *output);
+
+  InferenceEngine::OutputsDataMap out_maps_;
+  InferenceEngine::InputsDataMap inputInfo_;
+  InferenceEngine::ExecutableNetwork executable_network_;
+};
+
+}  // namespace PaddleDeploy

+ 140 - 0
deploy/cpp/model_deploy/engine/src/openvino_engine.cpp

@@ -0,0 +1,140 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "model_deploy/engine/include/openvino_engine.h"
+
+namespace PaddleDeploy {
+bool Model::OpenVinoEngineInit(const OpenVinoEngineConfig& engine_config) {
+  infer_engine_ = std::make_shared<OpenVinoEngine>();
+  InferenceConfig config("openvino");
+  *(config.openvino_config) = engine_config;
+  return infer_engine_->Init(config);
+}
+
+bool OpenVinoEngine::Init(const InferenceConfig& infer_config) {
+  const OpenVinoEngineConfig& engine_config = *(infer_config.openvino_config);
+  InferenceEngine::Core ie;
+  InferenceEngine::CNNNetwork network = ie.ReadNetwork(
+                              engine_config.xml_file_,
+                              engine_config.bin_file_);
+  inputInfo_ = network.getInputsInfo();
+  out_maps_ = network.getOutputsInfo();
+  network.setBatchSize(engine_config.batch_size_);
+  if (engine_config.device_ == "MYRIAD") {
+    std::map<std::string, std::string> networkConfig;
+    networkConfig["VPU_HW_STAGES_OPTIMIZATION"] = "NO";
+    executable_network_ = ie.LoadNetwork(
+            network, engine_config.device_, networkConfig);
+  } else {
+    executable_network_ = ie.LoadNetwork(network, engine_config.device_);
+  }
+  return true;
+}
+
+bool OpenVinoEngine::Infer(const std::vector<DataBlob> &inputs,
+                          std::vector<DataBlob> *outputs) {
+  InferenceEngine::InferRequest infer_request =
+        executable_network_.CreateInferRequest();
+  int i = 0;
+  for (const auto & item : inputInfo_) {
+    InferenceEngine::TensorDesc input_tensor;
+    InferenceEngine::Blob::Ptr input_blob =
+        infer_request.GetBlob(item.first);
+    InferenceEngine::MemoryBlob::Ptr input_mem_blob =
+        InferenceEngine::as<InferenceEngine::MemoryBlob>(input_blob);
+    auto mem_blob_holder = input_mem_blob->wmap();
+    int size = std::accumulate(inputs[i].shape.begin(),
+                    inputs[i].shape.end(), 1, std::multiplies<int>());
+    if (inputs[i].dtype == 0) {
+      input_tensor.setPrecision(InferenceEngine::Precision::FP32);
+      float *blob_data = mem_blob_holder.as<float *>();
+      memcpy(blob_data, inputs[i].data.data(), size * sizeof(float));
+    } else if (inputs[i].dtype == 1) {
+      input_tensor.setPrecision(InferenceEngine::Precision::U64);
+      int64_t *blob_data = mem_blob_holder.as<int64_t *>();
+      memcpy(blob_data, inputs[i].data.data(), size * sizeof(int64_t));
+    } else if (inputs[i].dtype == 2) {
+      input_tensor.setPrecision(InferenceEngine::Precision::I32);
+      int *blob_data = mem_blob_holder.as<int *>();
+      memcpy(blob_data, inputs[i].data.data(), size * sizeof(int));
+    } else if (inputs[i].dtype == 3) {
+      input_tensor.setPrecision(InferenceEngine::Precision::U8);
+      uint8_t *blob_data = mem_blob_holder.as<uint8_t *>();
+      memcpy(blob_data, inputs[i].data.data(), size * sizeof(uint8_t));
+      infer_request.SetBlob(inputs[i].name, input_blob);
+    }
+    i += 1;
+  }
+
+  // do inference
+  infer_request.Infer();
+
+  for (const auto & output_map : out_maps_) {
+    DataBlob output;
+    std::string name = output_map.first;
+    output.name = name;
+    InferenceEngine::Blob::Ptr output_ptr = infer_request.GetBlob(name);
+    InferenceEngine::MemoryBlob::CPtr moutput =
+      InferenceEngine::as<InferenceEngine::MemoryBlob>(output_ptr);
+    InferenceEngine::TensorDesc blob_output = moutput->getTensorDesc();
+    InferenceEngine::SizeVector output_shape = blob_output.getDims();
+    int size = 1;
+    output.shape.clear();
+    for (auto& i : output_shape) {
+      size *= i;
+      output.shape.push_back(static_cast<int>(i));
+    }
+    GetDtype(blob_output, &output);
+    auto moutputHolder = moutput->rmap();
+    if (output.dtype == 0) {
+      output.data.resize(size * sizeof(float));
+      float* data = moutputHolder.as<float *>();
+      memcpy(output.data.data(), data, size * sizeof(float));
+    } else if (output.dtype == 1) {
+      output.data.resize(size * sizeof(int64_t));
+      int64_t* data = moutputHolder.as<int64_t *>();
+      memcpy(output.data.data(), data, size * sizeof(int64_t));
+    } else if (output.dtype == 2) {
+      output.data.resize(size * sizeof(int));
+      int* data = moutputHolder.as<int *>();
+      memcpy(output.data.data(), data, size * sizeof(int));
+    } else if (output.dtype == 3) {
+      output.data.resize(size * sizeof(uint8_t));
+      uint8_t* data = moutputHolder.as<uint8_t *>();
+      memcpy(output.data.data(), data, size * sizeof(uint8_t));
+    }
+    outputs->push_back(std::move(output));
+  }
+  return true;
+}
+
+bool OpenVinoEngine::GetDtype(const InferenceEngine::TensorDesc &output_blob,
+                          DataBlob *output) {
+  InferenceEngine::Precision output_precision = output_blob.getPrecision();
+  if (output_precision == 10) {
+    output->dtype = FLOAT32;
+  } else if (output_precision == 72) {
+    output->dtype = INT64;
+  } else if (output_precision == 70) {
+    output->dtype = INT32;
+  } else if (output_precision == 40) {
+    output->dtype = INT8;
+  } else {
+    std::cout << "can't paser the precision type" << std::endl;
+    return false;
+  }
+  return true;
+}
+
+}  //  namespace PaddleDeploy

+ 7 - 0
deploy/cpp/model_deploy/ppseg/src/seg_postprocess.cpp

@@ -74,6 +74,13 @@ bool SegPostprocess::RunV2(const DataBlob& output,
                    std::back_inserter(label_vector),
                    [](int64_t x) { return (uint8_t)x;});
     label_data = reinterpret_cast<const uint8_t*>(label_vector.data());
+  } else if (output.dtype == INT32) {  // int32
+    const int32_t* output_data =
+          reinterpret_cast<const int32_t*>(output.data.data());
+    std::transform(output_data, output_data + label_map_size * batch_size,
+                   std::back_inserter(label_vector),
+                   [](int32_t x) { return (uint8_t)x;});
+    label_data = reinterpret_cast<const uint8_t*>(label_vector.data());
   } else if (output.dtype == INT8) {  // uint8
     label_data = reinterpret_cast<const uint8_t*>(output.data.data());
   } else {

+ 3 - 3
deploy/cpp/scripts/build.sh

@@ -3,14 +3,14 @@ WITH_GPU=ON
 # 使用MKL or openblas
 WITH_MKL=ON
 # 是否集成 TensorRT(仅WITH_GPU=ON 有效)
-WITH_TENSORRT=OFF
+WITH_PADDLE_TENSORRT=OFF
 # TensorRT 的路径,如果需要集成TensorRT,需修改为您实际安装的TensorRT路径
 TENSORRT_DIR=$(pwd)/TensorRT/
 # Paddle 预测库路径, 请修改为您实际安装的预测库路径
 PADDLE_DIR=$(pwd)/paddle_inference
 # Paddle 的预测库是否使用静态库来编译
 # 使用TensorRT时,Paddle的预测库通常为动态库
-WITH_STATIC_LIB=ON
+WITH_STATIC_LIB=OFF
 # CUDA 的 lib 路径
 CUDA_LIB=/usr/local/cuda/lib64
 # CUDNN 的 lib 路径
@@ -37,7 +37,7 @@ cd build
 cmake .. \
     -DWITH_GPU=${WITH_GPU} \
     -DWITH_MKL=${WITH_MKL} \
-    -DWITH_TENSORRT=${WITH_TENSORRT} \
+    -DWITH_PADDLE_TENSORRT=${WITH_PADDLE_TENSORRT} \
     -DTENSORRT_DIR=${TENSORRT_DIR} \
     -DPADDLE_DIR=${PADDLE_DIR} \
     -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \

+ 64 - 0
deploy/cpp/scripts/onnx/openvino_build.sh

@@ -0,0 +1,64 @@
+# OpenVINO预编译库inference_engine的路径
+OPENVINO_DIR=$INTEL_OPENVINO_DIR/inference_engine
+
+# ngraph lib的路径,编译openvino时通常会生成
+NGRAPH_LIB=$INTEL_OPENVINO_DIR/deployment_tools/ngraph
+
+# gflags预编译库的路径
+GFLAGS_DIR=$(pwd)/deps/gflags
+
+# opencv使用自带预编译版本
+OPENCV_DIR=$(pwd)/deps/opencv/
+
+#cpu架构
+ARCH=x86
+export ARCH
+
+#下载并编译 gflags
+GFLAGS_URL=https://bj.bcebos.com/paddlex/deploy/gflags.tar.gz
+if [ ! -d ${GFLAGS_DIR} ]; then
+    cd deps
+    wget -c ${GFLAGS_URL} -O glog.tar.gz
+    tar -zxvf glog.tar.gz
+    rm -rf glog.tar.gz
+    cd ..
+fi
+
+mkdir -p deps
+# opencv
+if [ "$ARCH" = "x86" ]; then
+    OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/x86opencv/opencv.tar.bz2
+else
+    OPENCV_URL=https://bj.bcebos.com/paddlex/deploy/armlinux/opencv.tar.bz2
+fi
+if [ ! -d "./deps/opencv" ]; then
+    cd deps
+    wget -c ${OPENCV_URL}
+    tar xvfj opencv.tar.bz2
+    rm -rf opencv.tar.bz2
+    cd ..
+fi
+
+rm -rf build
+mkdir -p build
+cd build
+if [ ${ARCH} = "x86" ];then
+  cmake .. \
+      -DWITH_OPENVINO=ON \
+      -DOPENCV_DIR=${OPENCV_DIR} \
+      -DGFLAGS_DIR=${GFLAGS_DIR} \
+      -DOPENVINO_DIR=${OPENVINO_DIR} \
+      -DNGRAPH_LIB=${NGRAPH_LIB} \
+      -DARCH=${ARCH}
+  make -j16
+else
+  cmake .. \
+      -DWITH_OPENVINO=ON \
+      -DOPENCV_DIR=${OPENCV_DIR} \
+      -DGFLAGS_DIR=${GFLAGS_DIR} \
+      -DOPENVINO_DIR=${OPENVINO_DIR} \
+      -DNGRAPH_LIB=${NGRAPH_LIB} \
+      -DARCH=${ARCH} \
+      -DCMAKE_CXX_FLAGS="-march=armv7-a"
+  make
+fi

+ 0 - 0
deploy/cpp/scripts/tensorrt_build.sh → deploy/cpp/scripts/onnx/tensorrt_build.sh


+ 0 - 0
deploy/cpp/scripts/triton_build.sh → deploy/cpp/scripts/onnx/triton_build.sh