Преглед на файлове

add video deploy for human_seg, classifier, detector and segmenter

FlyingQianMM преди 5 години
родител
ревизия
76edffaa11

+ 73 - 13
deploy/cpp/CMakeLists.txt

@@ -3,7 +3,11 @@ project(PaddleX CXX C)
 
 option(WITH_MKL        "Compile demo with MKL/OpenBlas support,defaultuseMKL."          ON)
 option(WITH_GPU        "Compile demo with GPU/CPU, default use CPU."                    ON)
-option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   OFF)
+if (NOT WIN32)
+    option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   OFF)
+else()
+    option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   ON)
+endif()
 option(WITH_TENSORRT "Compile demo with TensorRT."   OFF)
 option(WITH_ENCRYPTION "Compile demo with encryption tool."   OFF)
 
@@ -57,8 +61,10 @@ if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
     message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
 endif()
 
-if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
+if (NOT (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64"))
+  if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
     message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
+  endif()
 endif()
 
 include_directories("${CMAKE_SOURCE_DIR}/")
@@ -106,10 +112,17 @@ if (WIN32)
   find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
   unset(OpenCV_DIR CACHE)
 else ()
-  find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
+  if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") # x86_64 aarch64
+    set(OpenCV_INCLUDE_DIRS "/usr/include/opencv4")
+    file(GLOB OpenCV_LIBS /usr/lib/aarch64-linux-gnu/libopencv_*${CMAKE_SHARED_LIBRARY_SUFFIX})
+    message("OpenCV libs: ${OpenCV_LIBS}")
+  else()
+    find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
+  endif()
   include_directories("${PADDLE_DIR}/paddle/include")
   link_directories("${PADDLE_DIR}/paddle/lib")
 endif ()
+
 include_directories(${OpenCV_INCLUDE_DIRS})
 
 if (WIN32)
@@ -271,6 +284,7 @@ if (NOT WIN32)
 endif()
 
 set(DEPS ${DEPS} ${OpenCV_LIBS})
+
 add_library(paddlex_inference SHARED src/visualize src/transforms.cpp src/paddlex.cpp)
 ADD_DEPENDENCIES(paddlex_inference ext-yaml-cpp)
 target_link_libraries(paddlex_inference ${DEPS})
@@ -287,26 +301,60 @@ add_executable(segmenter demo/segmenter.cpp src/transforms.cpp src/paddlex.cpp s
 ADD_DEPENDENCIES(segmenter ext-yaml-cpp)
 target_link_libraries(segmenter ${DEPS})
 
+add_executable(video_classifier demo/video_classifier.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(video_classifier ext-yaml-cpp)
+target_link_libraries(video_classifier ${DEPS})
+
+add_executable(video_detector demo/video_detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(video_detector ext-yaml-cpp)
+target_link_libraries(video_detector ${DEPS})
+
+add_executable(video_segmenter demo/video_segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(video_segmenter ext-yaml-cpp)
+target_link_libraries(video_segmenter ${DEPS})
+
+
 if (WIN32 AND WITH_MKL)
     add_custom_command(TARGET classifier POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
     )
     add_custom_command(TARGET detector POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
     )
     add_custom_command(TARGET segmenter POST_BUILD
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
-        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
+    )
+    add_custom_command(TARGET video_classifier POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+    )
+    add_custom_command(TARGET video_detector POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+    )
+    add_custom_command(TARGET video_segmenter POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./paddlex_inference/Release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./paddlex_inference/Release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./paddlex_inference/Release/mkldnn.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
         COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
@@ -325,6 +373,18 @@ if (WIN32 AND WITH_MKL)
             COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./pmodel-decrypt.dll
             COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./release/pmodel-decrypt.dll
         )
+        add_custom_command(TARGET video_classifier POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./pmodel-decrypt.dll
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./release/pmodel-decrypt.dll
+        )
+        add_custom_command(TARGET video_detector POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./pmodel-decrypt.dll
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./release/pmodel-decrypt.dll
+        )
+        add_custom_command(TARGET video_segmenter POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./pmodel-decrypt.dll
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./release/pmodel-decrypt.dll
+        )
     endif()
 endif()
 

+ 1 - 3
deploy/cpp/demo/classifier.cpp

@@ -37,7 +37,6 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
 DEFINE_int32(thread_num,
              omp_get_num_procs(),
              "Number of preprocessing threads");
-DEFINE_bool(use_ir_optim, true, "use ir optimization");
 
 int main(int argc, char** argv) {
   // Parsing command-line
@@ -58,8 +57,7 @@ int main(int argc, char** argv) {
              FLAGS_use_gpu,
              FLAGS_use_trt,
              FLAGS_gpu_id,
-             FLAGS_key,
-             FLAGS_use_ir_optim);
+             FLAGS_key);
 
   // 进行预测
   int imgs = 1;

+ 1 - 3
deploy/cpp/demo/detector.cpp

@@ -43,7 +43,6 @@ DEFINE_double(threshold,
 DEFINE_int32(thread_num,
              omp_get_num_procs(),
              "Number of preprocessing threads");
-DEFINE_bool(use_ir_optim, true, "use ir optimization");
 
 int main(int argc, char** argv) {
   // 解析命令行参数
@@ -63,8 +62,7 @@ int main(int argc, char** argv) {
              FLAGS_use_gpu,
              FLAGS_use_trt,
              FLAGS_gpu_id,
-             FLAGS_key,
-             FLAGS_use_ir_optim);
+             FLAGS_key);
   int imgs = 1;
   std::string save_dir = "output";
   // 进行预测

+ 1 - 3
deploy/cpp/demo/segmenter.cpp

@@ -39,7 +39,6 @@ DEFINE_int32(batch_size, 1, "Batch size of infering");
 DEFINE_int32(thread_num,
              omp_get_num_procs(),
              "Number of preprocessing threads");
-DEFINE_bool(use_ir_optim, false, "use ir optimization");
 
 int main(int argc, char** argv) {
   // 解析命令行参数
@@ -60,8 +59,7 @@ int main(int argc, char** argv) {
              FLAGS_use_gpu,
              FLAGS_use_trt,
              FLAGS_gpu_id,
-             FLAGS_key,
-             FLAGS_use_ir_optim);
+             FLAGS_key);
   int imgs = 1;
   // 进行预测
   if (FLAGS_image_list != "") {

+ 186 - 0
deploy/cpp/demo/video_classifier.cpp

@@ -0,0 +1,186 @@
+//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <glog/logging.h>
+#include <omp.h>
+
+#include <algorithm>
+#include <chrono>  // NOLINT
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+#include <utility>
+
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+#if defined(__arm__) || defined(__aarch64__)
+#include <opencv2/videoio/legacy/constants_c.h>
+#endif
+
+using namespace std::chrono;  // NOLINT
+
+DEFINE_string(model_dir, "", "Path of inference model");
+DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
+DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
+DEFINE_bool(use_camera, false, "Infering with Camera");
+DEFINE_int32(camera_id, 0, "Camera id");
+DEFINE_string(video_path, "", "Path of input video");
+DEFINE_bool(show_result, false, "show the result of each frame with a window");
+DEFINE_bool(save_result, true, "save the result of each frame to a video");
+DEFINE_string(save_dir, "output", "Path to save visualized image");
+
+int main(int argc, char** argv) {
+  // Parsing command-line
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  if (FLAGS_model_dir == "") {
+    std::cerr << "--model_dir need to be defined" << std::endl;
+    return -1;
+  }
+  if (FLAGS_video_path == "" & FLAGS_use_camera == false) {
+    std::cerr << "--video_path or --use_camera need to be defined" << std::endl;
+    return -1;
+  }
+
+  // 加载模型
+  PaddleX::Model model;
+  model.Init(FLAGS_model_dir,
+             FLAGS_use_gpu,
+             FLAGS_use_trt,
+             FLAGS_gpu_id,
+             FLAGS_key);
+
+  // 打开视频流
+  cv::VideoCapture capture;
+  if (FLAGS_use_camera) {
+    capture.open(FLAGS_camera_id);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the camera "
+                << FLAGS_camera_id << "."
+                << std::endl;
+      return -1;
+    }
+  } else {
+    capture.open(FLAGS_video_path);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the video "
+                << FLAGS_video_path << "."
+                << std::endl;
+      return -1;
+    }
+  }
+
+  // 创建VideoWriter
+  cv::VideoWriter video_out;
+  std::string video_out_path;
+  if (FLAGS_save_result) {
+    // 获取视频流信息: 分辨率, 帧率
+    int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
+    int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
+    int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
+    int video_fourcc;
+    if (FLAGS_use_camera) {
+      video_fourcc = 828601953;
+    } else {
+      video_fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
+    }
+
+    if (FLAGS_use_camera) {
+      time_t now = time(0);
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir,
+                                    std::to_string(now) + ".mp4");
+    } else {
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_video_path);
+    }
+    video_out.open(video_out_path.c_str(),
+                   video_fourcc,
+                   video_fps,
+                   cv::Size(video_width, video_height),
+                   true);
+    if (!video_out.isOpened()) {
+      std::cout << "Create video writer failed!" << std::endl;
+      return -1;
+    }
+  }
+
+  PaddleX::ClsResult result;
+  cv::Mat frame;
+  int key;
+  while (capture.read(frame)) {
+    if (FLAGS_show_result || FLAGS_use_camera) {
+     key = cv::waitKey(1);
+     // 按下ESC退出整个程序,保存视频文件到磁盘
+     if (key == 27) {
+       break;
+     }
+    } else if (frame.empty()) {
+      break;
+    }
+    // 开始预测
+    model.predict(frame, &result);
+    // 可视化
+    cv::Mat vis_img = frame.clone();
+    auto colormap = PaddleX::GenerateColorMap(model.labels.size());
+    int c1 = colormap[3 * result.category_id + 0];
+    int c2 = colormap[3 * result.category_id + 1];
+    int c3 = colormap[3 * result.category_id + 2];
+    cv::Scalar text_color = cv::Scalar(c1, c2, c3);
+    std::string text = result.category;
+    text += std::to_string(static_cast<int>(result.score * 100)) + "%";
+    int font_face = cv::FONT_HERSHEY_SIMPLEX;
+    double font_scale = 0.5f;
+    float thickness = 0.5;
+    cv::Size text_size =
+        cv::getTextSize(text, font_face, font_scale, thickness, nullptr);
+    cv::Point origin;
+    origin.x = frame.cols / 2;
+    origin.y = frame.rows / 2;
+    cv::Rect text_back = cv::Rect(origin.x,
+                                  origin.y - text_size.height,
+                                  text_size.width,
+                                  text_size.height);
+    cv::rectangle(vis_img, text_back, text_color, -1);
+    cv::putText(vis_img,
+                text,
+                origin,
+                font_face,
+                font_scale,
+                cv::Scalar(255, 255, 255),
+                thickness);
+    if (FLAGS_show_result || FLAGS_use_camera) {
+      cv::imshow("human_seg", vis_img);
+    }
+    if (FLAGS_save_result) {
+      video_out.write(vis_img);
+    }
+    std::cout << "Predict label: " << result.category
+              << ", label_id:" << result.category_id
+              << ", score: " << result.score << std::endl;
+  }
+  capture.release();
+  if (FLAGS_save_result) {
+    video_out.release();
+    std::cout << "Visualized output saved as " << video_out_path << std::endl;
+  }
+  if (FLAGS_show_result || FLAGS_use_camera) {
+    cv::destroyAllWindows();
+  }
+  return 0;
+}

+ 158 - 0
deploy/cpp/demo/video_detector.cpp

@@ -0,0 +1,158 @@
+//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <glog/logging.h>
+#include <omp.h>
+
+#include <algorithm>
+#include <chrono>  // NOLINT
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+#include <utility>
+
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+#if defined(__arm__) || defined(__aarch64__)
+#include <opencv2/videoio/legacy/constants_c.h>
+#endif
+
+using namespace std::chrono;  // NOLINT
+
+DEFINE_string(model_dir, "", "Path of inference model");
+DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
+DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_bool(use_camera, false, "Infering with Camera");
+DEFINE_int32(camera_id, 0, "Camera id");
+DEFINE_string(video_path, "", "Path of input video");
+DEFINE_bool(show_result, false, "show the result of each frame with a window");
+DEFINE_bool(save_result, true, "save the result of each frame to a video");
+DEFINE_string(key, "", "key of encryption");
+DEFINE_string(save_dir, "output", "Path to save visualized image");
+DEFINE_double(threshold,
+              0.5,
+              "The minimum scores of target boxes which are shown");
+
+int main(int argc, char** argv) {
+  // 解析命令行参数
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  if (FLAGS_model_dir == "") {
+    std::cerr << "--model_dir need to be defined" << std::endl;
+    return -1;
+  }
+  if (FLAGS_video_path == "" & FLAGS_use_camera == false) {
+    std::cerr << "--video_path or --use_camera need to be defined" << std::endl;
+    return -1;
+  }
+  // 加载模型
+  PaddleX::Model model;
+  model.Init(FLAGS_model_dir,
+             FLAGS_use_gpu,
+             FLAGS_use_trt,
+             FLAGS_gpu_id,
+             FLAGS_key);
+  // 打开视频流
+  cv::VideoCapture capture;
+  if (FLAGS_use_camera) {
+    capture.open(FLAGS_camera_id);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the camera "
+                << FLAGS_camera_id << "."
+                << std::endl;
+      return -1;
+    }
+  } else {
+    capture.open(FLAGS_video_path);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the video "
+                << FLAGS_video_path << "."
+                << std::endl;
+      return -1;
+    }
+  }
+
+  // 创建VideoWriter
+  cv::VideoWriter video_out;
+  std::string video_out_path;
+  if (FLAGS_save_result) {
+    // 获取视频流信息: 分辨率, 帧率
+    int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
+    int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
+    int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
+    int video_fourcc;
+    if (FLAGS_use_camera) {
+      video_fourcc = 828601953;
+    } else {
+      video_fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
+    }
+
+    if (FLAGS_use_camera) {
+      time_t now = time(0);
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir,
+                                    std::to_string(now) + ".mp4");
+    } else {
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_video_path);
+    }
+    video_out.open(video_out_path.c_str(),
+                   video_fourcc,
+                   video_fps,
+                   cv::Size(video_width, video_height),
+                   true);
+    if (!video_out.isOpened()) {
+      std::cout << "Create video writer failed!" << std::endl;
+      return -1;
+    }
+  }
+
+  PaddleX::DetResult result;
+  cv::Mat frame;
+  int key;
+  while (capture.read(frame)) {
+    if (FLAGS_show_result || FLAGS_use_camera) {
+     key = cv::waitKey(1);
+     // 按下ESC退出整个程序,保存视频文件到磁盘
+     if (key == 27) {
+       break;
+     }
+    } else if (frame.empty()) {
+      break;
+    }
+    model.predict(frame, &result);
+    // 可视化
+    cv::Mat vis_img =
+        PaddleX::Visualize(frame, result, model.labels, FLAGS_threshold);
+    if (FLAGS_show_result || FLAGS_use_camera) {
+      cv::imshow("human_seg", vis_img);
+    }
+    if (FLAGS_save_result) {
+      video_out.write(vis_img);
+    }
+    result.clear();
+  }
+  capture.release();
+  if (FLAGS_save_result) {
+    std::cout << "Visualized output saved as " << video_out_path << std::endl;
+    video_out.release();
+  }
+  if (FLAGS_show_result || FLAGS_use_camera) {
+    cv::destroyAllWindows();
+  }
+  return 0;
+}

+ 157 - 0
deploy/cpp/demo/video_segmenter.cpp

@@ -0,0 +1,157 @@
+//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <glog/logging.h>
+#include <omp.h>
+
+#include <algorithm>
+#include <chrono>  // NOLINT
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ctime>
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+#if defined(__arm__) || defined(__aarch64__)
+#include <opencv2/videoio/legacy/constants_c.h>
+#endif
+
+using namespace std::chrono;  // NOLINT
+
+DEFINE_string(model_dir, "", "Path of inference model");
+DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
+DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
+DEFINE_bool(use_camera, false, "Infering with Camera");
+DEFINE_int32(camera_id, 0, "Camera id");
+DEFINE_string(video_path, "", "Path of input video");
+DEFINE_bool(show_result, false, "show the result of each frame with a window");
+DEFINE_bool(save_result, true, "save the result of each frame to a video");
+DEFINE_string(save_dir, "output", "Path to save visualized image");
+
+int main(int argc, char** argv) {
+  // 解析命令行参数
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  if (FLAGS_model_dir == "") {
+    std::cerr << "--model_dir need to be defined" << std::endl;
+    return -1;
+  }
+  if (FLAGS_video_path == "" & FLAGS_use_camera == false) {
+    std::cerr << "--video_path or --use_camera need to be defined" << std::endl;
+    return -1;
+  }
+
+  // 加载模型
+  PaddleX::Model model;
+  model.Init(FLAGS_model_dir,
+             FLAGS_use_gpu,
+             FLAGS_use_trt,
+             FLAGS_gpu_id,
+             FLAGS_key);
+  // 打开视频流
+  cv::VideoCapture capture;
+  if (FLAGS_use_camera) {
+    capture.open(FLAGS_camera_id);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the camera "
+                << FLAGS_camera_id << "."
+                << std::endl;
+      return -1;
+    }
+  } else {
+    capture.open(FLAGS_video_path);
+    if (!capture.isOpened()) {
+      std::cout << "Can not open the video "
+                << FLAGS_video_path << "."
+                << std::endl;
+      return -1;
+    }
+  }
+
+
+  // 创建VideoWriter
+  cv::VideoWriter video_out;
+  std::string video_out_path;
+  if (FLAGS_save_result) {
+    // 获取视频流信息: 分辨率, 帧率
+    int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
+    int video_height = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
+    int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
+    int video_fourcc;
+    if (FLAGS_use_camera) {
+      video_fourcc = 828601953;
+    } else {
+      video_fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
+    }
+
+    if (FLAGS_use_camera) {
+      time_t now = time(0);
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir,
+                                    std::to_string(now) + ".mp4");
+    } else {
+      video_out_path =
+        PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_video_path);
+    }
+    video_out.open(video_out_path.c_str(),
+                   video_fourcc,
+                   video_fps,
+                   cv::Size(video_width, video_height),
+                   true);
+    if (!video_out.isOpened()) {
+      std::cout << "Create video writer failed!" << std::endl;
+      return -1;
+    }
+  }
+
+  PaddleX::SegResult result;
+  cv::Mat frame;
+  int key;
+  while (capture.read(frame)) {
+    if (FLAGS_show_result || FLAGS_use_camera) {
+     key = cv::waitKey(1);
+     // 按下ESC退出整个程序,保存视频文件到磁盘
+     if (key == 27) {
+       break;
+     }
+    } else if (frame.empty()) {
+      break;
+    }
+    // 开始预测
+    model.predict(frame, &result);
+    // 可视化
+    cv::Mat vis_img = PaddleX::Visualize(frame, result, model.labels);
+    if (FLAGS_show_result || FLAGS_use_camera) {
+      cv::imshow("human_seg", vis_img);
+    }
+    if (FLAGS_save_result) {
+      video_out.write(vis_img);
+    }
+    result.clear();
+  }
+  capture.release();
+  if (FLAGS_save_result) {
+    video_out.release();
+    std::cout << "Visualized output saved as " << video_out_path << std::endl;
+  }
+  if (FLAGS_show_result || FLAGS_use_camera) {
+    cv::destroyAllWindows();
+  }
+  return 0;
+}

+ 5 - 4
deploy/cpp/scripts/bootstrap.sh

@@ -7,12 +7,13 @@ if [ ! -d "./paddlex-encryption" ]; then
 fi
 
 # download pre-compiled opencv lib
-OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2
-if [ ! -d "./deps/opencv3gcc4.8" ]; then
+#OPENCV_URL=https://paddleseg.bj.bcebos.com/deploy/docker/opencv3gcc4.8.tar.bz2
+OPENCV_URL=https://bj.bcebos.com/paddleseg/deploy/opencv3.4.6gcc4.8ffmpeg.tar.gz2
+if [ ! -d "./deps/opencv3.4.6gcc4.8ffmpeg/" ]; then
     mkdir -p deps
     cd deps
     wget -c ${OPENCV_URL}
-    tar xvfj opencv3gcc4.8.tar.bz2
-    rm -rf opencv3gcc4.8.tar.bz2
+    tar xvfj opencv3.4.6gcc4.8ffmpeg.tar.gz2
+    rm -rf opencv3.4.6gcc4.8ffmpeg.tar.gz2
     cd ..
 fi

+ 5 - 4
deploy/cpp/scripts/build.sh

@@ -1,5 +1,5 @@
 # 是否使用GPU(即是否使用 CUDA)
-WITH_GPU=OFF
+WITH_GPU=ON
 # 使用MKL or openblas
 WITH_MKL=ON
 # 是否集成 TensorRT(仅WITH_GPU=ON 有效)
@@ -7,14 +7,15 @@ WITH_TENSORRT=OFF
 # TensorRT 的路径,如果需要集成TensorRT,需修改为您实际安装的TensorRT路径
 TENSORRT_DIR=/root/projects/TensorRT/
 # Paddle 预测库路径, 请修改为您实际安装的预测库路径
-PADDLE_DIR=/root/projects/fluid_inference
+#PADDLE_DIR=/rrpn/my/qh_PaddleX/PaddleX/deploy/cpp/fluid_inference
+PADDLE_DIR=/rrpn/my/PaddleX/deploy/cpp/fluid_inference
 # Paddle 的预测库是否使用静态库来编译
 # 使用TensorRT时,Paddle的预测库通常为动态库
 WITH_STATIC_LIB=OFF
 # CUDA 的 lib 路径
 CUDA_LIB=/usr/local/cuda/lib64
 # CUDNN 的 lib 路径
-CUDNN_LIB=/usr/local/cuda/lib64
+CUDNN_LIB=/usr/lib/x86_64-linux-gnu/
 
 # 是否加载加密后的模型
 WITH_ENCRYPTION=ON
@@ -24,7 +25,7 @@ ENCRYPTION_DIR=$(pwd)/paddlex-encryption
 
 # OPENCV 路径, 如果使用自带预编译版本可不修改
 sh $(pwd)/scripts/bootstrap.sh  # 下载预编译版本的opencv
-OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
+OPENCV_DIR=$(pwd)/deps/opencv3.4.6gcc4.8ffmpeg/
 
 # 以下无需改动
 rm -rf build

+ 39 - 4
docs/deploy/server/cpp/linux.md

@@ -116,7 +116,7 @@ yaml-cpp.zip文件下载后无需解压,在cmake/yaml.cmake中将`URL https://
 
 **在加载模型前,请检查你的模型目录中文件应该包括`model.yml`、`__model__`和`__params__`三个文件。如若不满足这个条件,请参考[模型导出为Inference文档](../../export_model.md)将模型导出为部署格式。**  
 
-编译成功后,预测demo的可执行程序分别为`build/demo/detector`,`build/demo/classifier`,`build/demo/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+* 编译成功后,图片预测demo的可执行程序分别为`build/demo/detector`,`build/demo/classifier`,`build/demo/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
 
 |  参数   | 说明  |
 |  ----  | ----  |
@@ -130,7 +130,24 @@ yaml-cpp.zip文件下载后无需解压,在cmake/yaml.cmake中将`URL https://
 | key | 加密过程中产生的密钥信息,默认值为""表示加载的是未加密的模型 |
 | batch_size | 预测的批量大小,默认为1 |
 | thread_num | 预测的线程数,默认为cpu处理器个数 |
-| use_ir_optim | 是否使用图优化策略,支持值为0或1(默认值为1,图像分割默认值为0)|
+
+* 编译成功后,视频预测demo的可执行程序分别为`build/demo/video_detector`,`build/demo/video_classifier`,`build/demo/video_segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+
+|  参数   | 说明  |
+|  ----  | ----  |
+| model_dir  | 导出的预测模型所在路径 |
+| use_camera | 是否使用摄像头预测,支持值为0或1(默认值为0) |
+| camera_id | 摄像头设备ID,默认值为0 |
+| video_path | 视频文件的路径 |
+| use_gpu  | 是否使用 GPU 预测, 支持值为0或1(默认值为0) |
+| use_trt  | 是否使用 TensorRT 预测, 支持值为0或1(默认值为0) |
+| gpu_id  | GPU 设备ID, 默认值为0 |
+| show_result | 是否在屏幕上实时显示预测可视化结果(因加入了延迟处理,故显示结果不能反映真实的帧率),支持值为0或1(默认值为0) |
+| save_result | 是否将每帧的预测可视结果保存为视频文件,支持值为0或1(默认值为1) |
+| save_dir | 保存可视化结果的路径, 默认值为"output",**classfier无该参数** |
+| key | 加密过程中产生的密钥信息,默认值为""表示加载的是未加密的模型 |
+
+**注意:若系统无GUI,则不要将show_result设置为1。当使用摄像头预测时,按`ESC`键可关闭摄像头并推出预测程序。**
 
 ## 样例
 
@@ -138,7 +155,7 @@ yaml-cpp.zip文件下载后无需解压,在cmake/yaml.cmake中将`URL https://
 
 > 关于预测速度的说明:加载模型后前几张图片的预测速度会较慢,这是因为运行启动时涉及到内存显存初始化等步骤,通常在预测20-30张图片后模型的预测速度达到稳定。
 
-`样例一`:
+**样例一:**
 
 不使用`GPU`测试图片 `/root/projects/images/xiaoduxiong.jpeg`  
 
@@ -148,7 +165,7 @@ yaml-cpp.zip文件下载后无需解压,在cmake/yaml.cmake中将`URL https://
 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。
 
 
-`样例二`:
+**样例二:**
 
 使用`GPU`预测多个图片`/root/projects/image_list.txt`,image_list.txt内容的格式如下:
 ```
@@ -161,3 +178,21 @@ yaml-cpp.zip文件下载后无需解压,在cmake/yaml.cmake中将`URL https://
 ./build/demo/detector --model_dir=/root/projects/inference_model --image_list=/root/projects/images_list.txt --use_gpu=1 --save_dir=output --batch_size=2 --thread_num=2
 ```
 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。
+
+**样例三:**
+
+使用摄像头预测:
+
+```shell
+./build/demo/video_detector --model_dir=/root/projects/inference_model --use_camera=1 --use_gpu=1 --save_dir=output --save_result=1
+```
+当`save_result`设置为1时,`可视化预测结果`会以视频文件的格式保存在`save_dir`参数设置的目录下。如果系统有GUI,通过将`show_result`设置为1在屏幕上观看可视化预测结果。
+
+**样例四:**
+
+对视频文件进行预测:
+
+```shell
+./build/demo/video_detector --model_dir=/root/projects/inference_model --video_path=/path/to/video_file --use_gpu=1 --save_dir=output --show_result=1 --save_result=1
+```
+当`save_result`设置为1时,`可视化预测结果`会以视频文件的格式保存在`save_dir`参数设置的目录下。如果系统有GUI,通过将`show_result`设置为1在屏幕上观看可视化预测结果。

+ 34 - 2
docs/deploy/server/cpp/windows.md

@@ -101,7 +101,7 @@ D:
 cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release
 ```
 
-编译成功后,预测demo的入口程序为`paddlex_inference\detector.exe`,`paddlex_inference\classifier.exe`,`paddlex_inference\segmenter.exe`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+* 编译成功后,图片预测demo的入口程序为`paddlex_inference\detector.exe`,`paddlex_inference\classifier.exe`,`paddlex_inference\segmenter.exe`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
 
 |  参数   | 说明  |
 |  ----  | ----  |
@@ -114,7 +114,24 @@ cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release
 | key | 加密过程中产生的密钥信息,默认值为""表示加载的是未加密的模型 |
 | batch_size | 预测的批量大小,默认为1 |
 | thread_num | 预测的线程数,默认为cpu处理器个数 |
-| use_ir_optim | 是否使用图优化策略,支持值为0或1(默认值为1,图像分割默认值为0)|
+
+* 编译成功后,视频预测demo的入口程序为`paddlex_inference\video_detector.exe`,`paddlex_inference\video_classifier.exe`,`paddlex_inference\video_segmenter.exe`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+
+|  参数   | 说明  |
+|  ----  | ----  |
+| model_dir  | 导出的预测模型所在路径 |
+| use_camera | 是否使用摄像头预测,支持值为0或1(默认值为0) |
+| camera_id | 摄像头设备ID,默认值为0 |
+| video_path | 视频文件的路径 |
+| use_gpu  | 是否使用 GPU 预测, 支持值为0或1(默认值为0) |
+| gpu_id  | GPU 设备ID, 默认值为0 |
+| show_result | 对视频文件做预测时,是否在屏幕上实时显示预测可视化结果(因加入了延迟处理,故显示结果不能反映真实的帧率),支持值为0或1(默认值为0) |
+| save_result | 是否将每帧的预测可视结果保存为视频文件,支持值为0或1(默认值为1) |
+| save_dir | 保存可视化结果的路径, 默认值为"output",classifier无该参数 |
+| key | 加密过程中产生的密钥信息,默认值为""表示加载的是未加密的模型 |
+
+**注意:若系统无GUI,则不要将show_result设置为1。当使用摄像头预测时,按`ESC`键可关闭摄像头并推出预测程序。**
+
 
 ## 样例
 
@@ -157,3 +174,18 @@ D:\images\xiaoduxiongn.jpeg
 ```
 
 `--key`传入加密工具输出的密钥,例如`kLAl1qOs5uRbFt0/RrIDTZW2+tOf5bzvUIaHGF8lJ1c=`, 图片文件可视化预测结果会保存在`save_dir`参数设置的目录下。
+
+### 样例四:(使用未加密的模型开启摄像头预测)
+
+```shell
+.\paddlex_inference\video_detector.exe --model_dir=D:\projects\inference_model --use_camera=1 --use_gpu=1 --save_dir=output
+```
+当`save_result`设置为1时,`可视化预测结果`会以视频文件的格式保存在`save_dir`参数设置的目录下。如果系统有GUI,通过将`show_result`设置为1在屏幕上观看可视化预测结果。
+
+### 样例五:(使用未加密的模型对视频文件做预测)
+
+
+```shell
+.\paddlex_inference\video_detector.exe --model_dir=D:\projects\inference_model --video_path=D:\projects\video_test.mp4 --use_gpu=1 --show_result=1 --save_dir=output
+```
+当`save_result`设置为1时,`可视化预测结果`会以视频文件的格式保存在`save_dir`参数设置的目录下。如果系统有GUI,通过将`show_result`设置为1在屏幕上观看可视化预测结果。

+ 66 - 9
docs/examples/human_segmentation.md

@@ -1,12 +1,12 @@
 # 人像分割模型
 
-本教程基于PaddleX核心分割模型实现人像分割,开放预训练模型和测试数据、支持视频流人像分割、提供模型Fine-tune到Paddle Lite移动端部署的全流程应用指南。
+本教程基于PaddleX核心分割模型实现人像分割,开放预训练模型和测试数据、支持视频流人像分割、提供模型Fine-tune到Paddle Lite移动端及Nvidia Jeston嵌入式设备部署的全流程应用指南。
 
 ## 预训练模型和测试数据
 
 #### 预训练模型
 
-本案例开放了两个在大规模人像数据集上训练好的模型,以满足服务器端场景和移动端场景的需求。使用这些模型可以快速体验视频流人像分割,也可以部署到移动端进行实时人像分割,也可以用于完成模型Fine-tuning。
+本案例开放了两个在大规模人像数据集上训练好的模型,以满足服务器端场景和移动端场景的需求。使用这些模型可以快速体验视频流人像分割,也可以部署到移动端或嵌入式设备进行实时人像分割,也可以用于完成模型Fine-tuning。
 
 | 模型类型 | Checkpoint Parameter | Inference Model | Quant Inference Model | 备注 |
 | --- | --- | --- | ---| --- |
@@ -243,15 +243,17 @@ python quant_offline.py --model_dir output/best_model \
 * `--save_dir`: 量化模型保存路径
 * `--image_shape`: 网络输入图像大小(w, h)
 
-## Paddle Lite移动端部署
+## 推理部署
+
+### Paddle Lite移动端部署
 
 本案例将人像分割模型在移动端进行部署,部署流程展示如下,通用的移动端部署流程参见[Paddle Lite移动端部署](../../docs/deploy/paddlelite/android.md)。
 
-### 1. 将PaddleX模型导出为inference模型
+#### 1. 将PaddleX模型导出为inference模型
 
 本案例使用humanseg_mobile_quant预训练模型,该模型已经是inference模型,不需要再执行模型导出步骤。如果不使用预训练模型,则执行上一章节`模型训练`中的`模型导出`将自己训练的模型导出为inference格式。
 
-### 2. 将inference模型优化为Paddle Lite模型
+#### 2. 将inference模型优化为Paddle Lite模型
 
 下载并解压 [模型优化工具opt](https://bj.bcebos.com/paddlex/deploy/lite/model_optimize_tool_11cbd50e.tar.gz),进入模型优化工具opt所在路径后,执行以下命令:
 
@@ -273,16 +275,16 @@ python quant_offline.py --model_dir output/best_model \
 
 更详细的使用方法和参数含义请参考: [使用opt转化模型](https://paddle-lite.readthedocs.io/zh/latest/user_guides/opt/opt_bin.html)
 
-### 3. 移动端预测
+#### 3. 移动端预测
 
 PaddleX提供了基于PaddleX Android SDK的安卓demo,可供用户体验图像分类、目标检测、实例分割和语义分割,该demo位于`PaddleX/deploy/lite/android/demo`,用户将模型、配置文件和测试图片拷贝至该demo下进行预测。
 
-#### 3.1 前置依赖
+##### 3.1 前置依赖
 
 * Android Studio 3.4
 * Android手机或开发板
 
-#### 3.2 拷贝模型、配置文件和测试图片
+##### 3.2 拷贝模型、配置文件和测试图片
 
 * 将Lite模型(.nb文件)拷贝到`PaddleX/deploy/lite/android/demo/app/src/main/assets/model/`目录下, 根据.nb文件的名字,修改文件`PaddleX/deploy/lite/android/demo/app/src/main/res/values/strings.xml`中的`MODEL_PATH_DEFAULT`;
 
@@ -290,7 +292,7 @@ PaddleX提供了基于PaddleX Android SDK的安卓demo,可供用户体验图
 
 * 将测试图片拷贝到`PaddleX/deploy/lite/android/demo/app/src/main/assets/images/`目录下,根据图片文件的名字,修改文件`PaddleX/deploy/lite/android/demo/app/src/main/res/values/strings.xml`中的`IMAGE_PATH_DEFAULT`。
 
-#### 3.3 导入工程并运行
+##### 3.3 导入工程并运行
 
 * 打开Android Studio,在"Welcome to Android Studio"窗口点击"Open an existing Android Studio project",在弹出的路径选择窗口中进入`PaddleX/deploy/lite/android/demo`目录,然后点击右下角的"Open"按钮,导入工程;
 
@@ -303,3 +305,58 @@ PaddleX提供了基于PaddleX Android SDK的安卓demo,可供用户体验图
 测试图片及其分割结果如下所示:
 
 ![](./images/beauty.png)
+
+### Nvidia Jetson嵌入式设备部署
+
+#### c++部署
+
+step 1. 下载PaddleX源码
+
+```
+git clone https://github.com/PaddlePaddle/PaddleX
+```
+
+step 2. 将`PaddleX/examples/human_segmentation/deploy/cpp`下的`human_segmenter.cpp`和`CMakeList.txt`拷贝至`PaddleX/deploy/cpp`目录下,拷贝之前可以将`PaddleX/deploy/cpp`下原本的`CMakeList.txt`做好备份。
+
+step 3. 按照[Nvidia Jetson开发板部署](../deploy/nvidia-jetson.md)中的Step2至Step3完成C++预测代码的编译。
+
+step 4. 编译成功后,可执行程为`build/human_segmenter`,其主要命令参数说明如下:
+
+  | 参数    | 说明   |
+  | ---- | ---- |
+  |  model_dir    | 人像分割模型路径     |
+  | use_gpu	| 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
+  | gpu_id	| GPU 设备ID, 默认值为0 |
+  | use_camera | 是否使用摄像头采集图片,支持值为0或1(默认值为0) |
+  | camera_id | 摄像头设备ID,默认值为0 |
+  | video_path | 视频文件的路径 |
+  | show_result | 对视频文件做预测时,是否在屏幕上实时显示预测可视化结果,支持值为0或1(默认值为0) |
+  | save_result | 是否将每帧的预测可视结果保存为视频文件,支持值为0或1(默认值为1) |
+  |	image            | 待预测的图片路径  |
+  | save_dir	| 保存可视化结果的路径, 默认值为"output"|
+
+step 5. 推理预测
+
+  用于部署推理的模型应为inference格式,本案例使用humanseg_server_inference预训练模型,该模型已经是inference模型,不需要再执行模型导出步骤。如果不使用预训练模型,则执行第2章节`模型训练`中的`模型导出`将自己训练的模型导出为inference格式。
+
+  * 使用未加密的模型对单张图片做预测
+
+  待测试图片位于本案例提供的测试数据中,可以替换成自己的图片。
+
+  ```shell
+  ./build/human_segmenter --model_dir=/path/to/humanseg_server_inference --image=/path/to/data/mini_supervisely/Images/pexels-photo-63776.png --use_gpu=1 --save_dir=output
+  ```
+
+  * 使用未加密的模型开启摄像头做预测
+
+  ```shell
+  ./build/human_segmenter --model_dir=/path/to/humanseg_server_inference --use_camera=1 --save_result=1 --use_gpu=1 --save_dir=output
+  ```
+
+ * 使用未加密的模型对视频文件做预测
+
+ 待测试视频文件位于本案例提供的测试数据中,可以替换成自己的视频文件。
+
+  ```shell
+  ./build/human_segmenter --model_dir=/path/to/humanseg_server_inference --video_path=/path/to/data/mini_supervisely/video_test.mp4  --save_result=1 --use_gpu=1 --save_dir=output
+  ```

+ 13 - 24
docs/examples/meter_reader.md

@@ -46,13 +46,13 @@
 
 #### 测试表计读数
 
-1. 下载PaddleX源码:
+step 1. 下载PaddleX源码:
 
 ```
 git clone https://github.com/PaddlePaddle/PaddleX
 ```
 
-2. 预测执行文件位于`PaddleX/examples/meter_reader/`,进入该目录:
+step 2. 预测执行文件位于`PaddleX/examples/meter_reader/`,进入该目录:
 
 ```
 cd PaddleX/examples/meter_reader/
@@ -76,7 +76,7 @@ cd PaddleX/examples/meter_reader/
 | use_erode | 是否使用图像腐蚀对分割预测图进行细分,默认为False |
 | erode_kernel | 图像腐蚀操作时的卷积核大小,默认值为4 |
 
-3. 预测
+step 3. 预测
 
 若要使用GPU,则指定GPU卡号(以0号卡为例):
 
@@ -112,17 +112,17 @@ python3 reader_infer.py --detector_dir /path/to/det_inference_model --segmenter_
 
 #### c++部署
 
-1. 下载PaddleX源码:
+step 1. 下载PaddleX源码:
 
 ```
 git clone https://github.com/PaddlePaddle/PaddleX
 ```
 
-2. 将`PaddleX\examples\meter_reader\deploy\cpp`下的`meter_reader`文件夹和`CMakeList.txt`拷贝至`PaddleX\deploy\cpp`目录下,拷贝之前可以将`PaddleX\deploy\cpp`下原本的`CMakeList.txt`做好备份。
+step 2. 将`PaddleX\examples\meter_reader\deploy\cpp`下的`meter_reader`文件夹和`CMakeList.txt`拷贝至`PaddleX\deploy\cpp`目录下,拷贝之前可以将`PaddleX\deploy\cpp`下原本的`CMakeList.txt`做好备份。
 
-3. 按照[Windows平台部署](../deploy/server/cpp/windows.md)中的Step2至Step4完成C++预测代码的编译。
+step 3. 按照[Windows平台部署](../deploy/server/cpp/windows.md)中的Step2至Step4完成C++预测代码的编译。
 
-4. 编译成功后,可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
+step 4. 编译成功后,可执行文件在`out\build\x64-Release`目录下,打开`cmd`,并切换到该目录:
 
    ```
    cd PaddleX\deploy\cpp\out\build\x64-Release
@@ -149,7 +149,7 @@ git clone https://github.com/PaddlePaddle/PaddleX
    | erode_kernel | 图像腐蚀操作时的卷积核大小,默认值为4 |
    | score_threshold | 检测模型输出结果中,预测得分低于该阈值的框将被滤除,默认值为0.5|
 
-5. 推理预测:
+step 5. 推理预测:
 
   用于部署推理的模型应为inference格式,本案例提供的预训练模型均为inference格式,如若是重新训练的模型,需参考[部署模型导出](../deploy/export_model.md)将模型导出为inference格式。
 
@@ -183,17 +183,17 @@ git clone https://github.com/PaddlePaddle/PaddleX
 
 #### c++部署
 
-1. 下载PaddleX源码:
+step 1. 下载PaddleX源码:
 
 ```
 git clone https://github.com/PaddlePaddle/PaddleX
 ```
 
-2. 将`PaddleX/examples/meter_reader/deploy/cpp`下的`meter_reader`文件夹和`CMakeList.txt`拷贝至`PaddleX/deploy/cpp`目录下,拷贝之前可以将`PaddleX/deploy/cpp`下原本的`CMakeList.txt`做好备份。
+step 2. 将`PaddleX/examples/meter_reader/deploy/cpp`下的`meter_reader`文件夹和`CMakeList.txt`拷贝至`PaddleX/deploy/cpp`目录下,拷贝之前可以将`PaddleX/deploy/cpp`下原本的`CMakeList.txt`做好备份。
 
-3. 按照[Nvidia Jetson开发板部署](../deploy/nvidia-jetson.md)中的Step2至Step3完成C++预测代码的编译。
+step 3. 按照[Nvidia Jetson开发板部署](../deploy/nvidia-jetson.md)中的Step2至Step3完成C++预测代码的编译。
 
-4. 编译成功后,可执行程为`build/meter_reader/meter_reader`,其主要命令参数说明如下:
+step 4. 编译成功后,可执行程为`build/meter_reader/meter_reader`,其主要命令参数说明如下:
 
   | 参数    | 说明   |
   | ---- | ---- |
@@ -204,8 +204,6 @@ git clone https://github.com/PaddlePaddle/PaddleX
   | use_gpu	| 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
   | gpu_id	| GPU 设备ID, 默认值为0 |
   | save_dir	| 保存可视化结果的路径, 默认值为"output"|
-  | det_key	| 检测模型加密过程中产生的密钥信息,默认值为""表示加载的是未加密的检测模型 |
-  | seg_key	| 分割模型加密过程中产生的密钥信息,默认值为""表示加载的是未加密的分割模型 |
   | seg_batch_size | 分割的批量大小,默认为2 |
   | thread_num	| 分割预测的线程数,默认为cpu处理器个数 |
   | use_camera | 是否使用摄像头采集图片,支持值为0或1(默认值为0) |
@@ -214,7 +212,7 @@ git clone https://github.com/PaddlePaddle/PaddleX
   | erode_kernel | 图像腐蚀操作时的卷积核大小,默认值为4 |
   | score_threshold | 检测模型输出结果中,预测得分低于该阈值的框将被滤除,默认值为0.5|
 
-5. 推理预测:
+step 5. 推理预测:
 
   用于部署推理的模型应为inference格式,本案例提供的预训练模型均为inference格式,如若是重新训练的模型,需参考[部署模型导出](../deploy/export_model.md)将模型导出为inference格式。
 
@@ -236,15 +234,6 @@ git clone https://github.com/PaddlePaddle/PaddleX
   ./build/meter_reader/meter_reader --det_model_dir=/path/to/det_inference_model --seg_model_dir=/path/to/seg_inference_model --use_camera=1 --use_gpu=1 --use_erode=1 --save_dir=output
   ```
 
-  * 使用加密后的模型对单张图片做预测
-
-  如果未对模型进行加密,请参考[加密PaddleX模型](../deploy/server/encryption.html#paddlex)对模型进行加密。例如加密后的检测模型所在目录为`/path/to/encrypted_det_inference_model`,密钥为`yEBLDiBOdlj+5EsNNrABhfDuQGkdcreYcHcncqwdbx0=`;加密后的分割模型所在目录为`/path/to/encrypted_seg_inference_model`,密钥为`DbVS64I9pFRo5XmQ8MNV2kSGsfEr4FKA6OH9OUhRrsY=`
-
-  ```shell
-  ./build/meter_reader/meter_reader --det_model_dir=/path/to/encrypted_det_inference_model --seg_model_dir=/path/to/encrypted_seg_inference_model --image=/path/to/test.jpg --use_gpu=1 --use_erode=1 --save_dir=output --det_key yEBLDiBOdlj+5EsNNrABhfDuQGkdcreYcHcncqwdbx0= --seg_key DbVS64I9pFRo5XmQ8MNV2kSGsfEr4FKA6OH9OUhRrsY=
-  ```
-
-
 ## 模型训练
 
 

+ 321 - 0
examples/human_segmentation/deploy/cpp/CMakeLists.txt

@@ -0,0 +1,321 @@
+cmake_minimum_required(VERSION 3.0)
+project(PaddleX CXX C)
+
+option(WITH_MKL        "Compile human_segmenter with MKL/OpenBlas support,defaultuseMKL."          ON)
+option(WITH_GPU        "Compile human_segmenter with GPU/CPU, default use CPU."                    ON)
+if (NOT WIN32)
+    option(WITH_STATIC_LIB "Compile human_segmenter with static/shared library, default use static."   OFF)
+else()
+    option(WITH_STATIC_LIB "Compile human_segmenter with static/shared library, default use static."   ON)
+endif()
+option(WITH_TENSORRT "Compile human_segmenter with TensorRT."   OFF)
+option(WITH_ENCRYPTION "Compile human_segmenter with encryption tool."   OFF)
+
+SET(TENSORRT_DIR "" CACHE PATH "Location of libraries")
+SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
+SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
+SET(ENCRYPTION_DIR"" CACHE PATH "Location of libraries")
+SET(CUDA_LIB "" CACHE PATH "Location of libraries")
+
+if (NOT WIN32)
+    set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+    set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+else()
+    set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+    set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+    set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+endif()
+
+if (NOT WIN32)
+    SET(YAML_BUILD_TYPE ON CACHE BOOL "yaml build shared library.")
+else()
+    SET(YAML_BUILD_TYPE OFF CACHE BOOL "yaml build shared library.")
+endif()
+include(cmake/yaml-cpp.cmake)
+
+include_directories("${CMAKE_SOURCE_DIR}/")
+include_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/src/ext-yaml-cpp/include")
+link_directories("${CMAKE_CURRENT_BINARY_DIR}/ext/yaml-cpp/lib")
+
+macro(safe_set_static_flag)
+    foreach(flag_var
+        CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
+        CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+      if(${flag_var} MATCHES "/MD")
+        string(REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
+      endif(${flag_var} MATCHES "/MD")
+    endforeach(flag_var)
+endmacro()
+
+
+if (WITH_ENCRYPTION)
+add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION})
+endif()
+
+if (WITH_MKL)
+    ADD_DEFINITIONS(-DUSE_MKL)
+endif()
+
+if (NOT DEFINED PADDLE_DIR OR ${PADDLE_DIR} STREQUAL "")
+    message(FATAL_ERROR "please set PADDLE_DIR with -DPADDLE_DIR=/path/paddle_influence_dir")
+endif()
+
+if (NOT (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64"))
+  if (NOT DEFINED OPENCV_DIR OR ${OPENCV_DIR} STREQUAL "")
+    message(FATAL_ERROR "please set OPENCV_DIR with -DOPENCV_DIR=/path/opencv")
+  endif()
+endif()
+
+include_directories("${CMAKE_SOURCE_DIR}/")
+include_directories("${PADDLE_DIR}/")
+include_directories("${PADDLE_DIR}/third_party/install/protobuf/include")
+include_directories("${PADDLE_DIR}/third_party/install/glog/include")
+include_directories("${PADDLE_DIR}/third_party/install/gflags/include")
+include_directories("${PADDLE_DIR}/third_party/install/xxhash/include")
+if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/include")
+    include_directories("${PADDLE_DIR}/third_party/install/snappy/include")
+endif()
+if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/include")
+    include_directories("${PADDLE_DIR}/third_party/install/snappystream/include")
+endif()
+# zlib does not exist in 1.8.1
+if (EXISTS "${PADDLE_DIR}/third_party/install/zlib/include")
+    include_directories("${PADDLE_DIR}/third_party/install/zlib/include")
+endif()
+
+include_directories("${PADDLE_DIR}/third_party/boost")
+include_directories("${PADDLE_DIR}/third_party/eigen3")
+
+if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
+    link_directories("${PADDLE_DIR}/third_party/install/snappy/lib")
+endif()
+if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
+    link_directories("${PADDLE_DIR}/third_party/install/snappystream/lib")
+endif()
+
+if (EXISTS "${PADDLE_DIR}/third_party/install/zlib/lib")
+    link_directories("${PADDLE_DIR}/third_party/install/zlib/lib")
+endif()
+
+link_directories("${PADDLE_DIR}/third_party/install/protobuf/lib")
+link_directories("${PADDLE_DIR}/third_party/install/glog/lib")
+link_directories("${PADDLE_DIR}/third_party/install/gflags/lib")
+link_directories("${PADDLE_DIR}/third_party/install/xxhash/lib")
+link_directories("${PADDLE_DIR}/paddle/lib/")
+link_directories("${CMAKE_CURRENT_BINARY_DIR}")
+
+if (WIN32)
+  include_directories("${PADDLE_DIR}/paddle/fluid/inference")
+  include_directories("${PADDLE_DIR}/paddle/include")
+  link_directories("${PADDLE_DIR}/paddle/fluid/inference")
+  find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/build/ NO_DEFAULT_PATH)
+  unset(OpenCV_DIR CACHE)
+else ()
+  if (${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64") # x86_64 aarch64
+    set(OpenCV_INCLUDE_DIRS "/usr/include/opencv4")
+    file(GLOB OpenCV_LIBS /usr/lib/aarch64-linux-gnu/libopencv_*${CMAKE_SHARED_LIBRARY_SUFFIX})
+    message("OpenCV libs: ${OpenCV_LIBS}")
+  else()
+    find_package(OpenCV REQUIRED PATHS ${OPENCV_DIR}/share/OpenCV NO_DEFAULT_PATH)
+  endif()
+  include_directories("${PADDLE_DIR}/paddle/include")
+  link_directories("${PADDLE_DIR}/paddle/lib")
+endif ()
+include_directories(${OpenCV_INCLUDE_DIRS})
+
+if (WIN32)
+    add_definitions("/DGOOGLE_GLOG_DLL_DECL=")
+    find_package(OpenMP REQUIRED)
+    if (OPENMP_FOUND)
+        message("OPENMP FOUND")
+        set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} ${OpenMP_C_FLAGS}")
+        set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} ${OpenMP_C_FLAGS}")
+        set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} ${OpenMP_CXX_FLAGS}")
+        set(CMAKE_CXX_FLAGS_RELEASE   "${CMAKE_CXX_FLAGS_RELEASE} ${OpenMP_CXX_FLAGS}")
+    endif()
+    set(CMAKE_C_FLAGS_DEBUG   "${CMAKE_C_FLAGS_DEBUG} /bigobj /MTd")
+    set(CMAKE_C_FLAGS_RELEASE  "${CMAKE_C_FLAGS_RELEASE} /bigobj /MT")
+    set(CMAKE_CXX_FLAGS_DEBUG  "${CMAKE_CXX_FLAGS_DEBUG} /bigobj /MTd")
+    set(CMAKE_CXX_FLAGS_RELEASE   "${CMAKE_CXX_FLAGS_RELEASE} /bigobj /MT")
+    if (WITH_STATIC_LIB)
+        safe_set_static_flag()
+        add_definitions(-DSTATIC_LIB)
+    endif()
+else()
+    set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -o2 -fopenmp -std=c++11")
+    set(CMAKE_STATIC_LIBRARY_PREFIX "")
+endif()
+
+if (WITH_GPU)
+    if (NOT DEFINED CUDA_LIB OR ${CUDA_LIB} STREQUAL "")
+        message(FATAL_ERROR "please set CUDA_LIB with -DCUDA_LIB=/path/cuda/lib64")
+    endif()
+    if (NOT WIN32)
+        if (NOT DEFINED CUDNN_LIB)
+            message(FATAL_ERROR "please set CUDNN_LIB with -DCUDNN_LIB=/path/cudnn/")
+        endif()
+    endif(NOT WIN32)
+endif()
+
+
+if (NOT WIN32)
+  if (WITH_TENSORRT AND WITH_GPU)
+      include_directories("${TENSORRT_DIR}/include")
+      link_directories("${TENSORRT_DIR}/lib")
+  endif()
+endif(NOT WIN32)
+
+if (NOT WIN32)
+    set(NGRAPH_PATH "${PADDLE_DIR}/third_party/install/ngraph")
+    if(EXISTS ${NGRAPH_PATH})
+        include(GNUInstallDirs)
+        include_directories("${NGRAPH_PATH}/include")
+        link_directories("${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}")
+        set(NGRAPH_LIB ${NGRAPH_PATH}/${CMAKE_INSTALL_LIBDIR}/libngraph${CMAKE_SHARED_LIBRARY_SUFFIX})
+    endif()
+endif()
+
+if(WITH_MKL)
+  include_directories("${PADDLE_DIR}/third_party/install/mklml/include")
+  if (WIN32)
+    set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.lib
+            ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.lib)
+  else ()
+    set(MATH_LIB ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX}
+            ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5${CMAKE_SHARED_LIBRARY_SUFFIX})
+    execute_process(COMMAND cp -r ${PADDLE_DIR}/third_party/install/mklml/lib/libmklml_intel${CMAKE_SHARED_LIBRARY_SUFFIX} /usr/lib)
+  endif ()
+  set(MKLDNN_PATH "${PADDLE_DIR}/third_party/install/mkldnn")
+  if(EXISTS ${MKLDNN_PATH})
+    include_directories("${MKLDNN_PATH}/include")
+    if (WIN32)
+      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/mkldnn.lib)
+    else ()
+      set(MKLDNN_LIB ${MKLDNN_PATH}/lib/libmkldnn.so.0)
+    endif ()
+  endif()
+else()
+  set(MATH_LIB ${PADDLE_DIR}/third_party/install/openblas/lib/libopenblas${CMAKE_STATIC_LIBRARY_SUFFIX})
+endif()
+
+if (WIN32)
+    if(EXISTS "${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX}")
+        set(DEPS
+            ${PADDLE_DIR}/paddle/fluid/inference/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
+    else()
+        set(DEPS
+            ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
+    endif()
+endif()
+
+if(WITH_STATIC_LIB)
+    set(DEPS
+        ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_STATIC_LIBRARY_SUFFIX})
+else()
+    if (NOT WIN32)
+      set(DEPS
+          ${PADDLE_DIR}/paddle/lib/libpaddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
+    else()
+      set(DEPS
+          ${PADDLE_DIR}/paddle/lib/paddle_fluid${CMAKE_SHARED_LIBRARY_SUFFIX})
+    endif()
+endif()
+
+if (NOT WIN32)
+    set(DEPS ${DEPS}
+        ${MATH_LIB} ${MKLDNN_LIB}
+        glog gflags protobuf z xxhash yaml-cpp
+        )
+    if(EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
+        set(DEPS ${DEPS} snappystream)
+    endif()
+    if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
+        set(DEPS ${DEPS} snappy)
+    endif()
+else()
+    set(DEPS ${DEPS}
+        ${MATH_LIB} ${MKLDNN_LIB}
+        glog gflags_static libprotobuf xxhash libyaml-cppmt)
+
+    if (EXISTS "${PADDLE_DIR}/third_party/install/zlib/lib")
+      set(DEPS ${DEPS} zlibstatic)
+    endif()
+    set(DEPS ${DEPS} libcmt shlwapi)
+    if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
+        set(DEPS ${DEPS} snappy)
+    endif()
+    if (EXISTS "${PADDLE_DIR}/third_party/install/snappystream/lib")
+        set(DEPS ${DEPS} snappystream)
+    endif()
+endif(NOT WIN32)
+
+if(WITH_GPU)
+  if(NOT WIN32)
+    if (WITH_TENSORRT)
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer${CMAKE_SHARED_LIBRARY_SUFFIX})
+      set(DEPS ${DEPS} ${TENSORRT_DIR}/lib/libnvinfer_plugin${CMAKE_SHARED_LIBRARY_SUFFIX})
+    endif()
+    set(DEPS ${DEPS} ${CUDA_LIB}/libcudart${CMAKE_SHARED_LIBRARY_SUFFIX})
+    set(DEPS ${DEPS} ${CUDNN_LIB}/libcudnn${CMAKE_SHARED_LIBRARY_SUFFIX})
+  else()
+    set(DEPS ${DEPS} ${CUDA_LIB}/cudart${CMAKE_STATIC_LIBRARY_SUFFIX} )
+    set(DEPS ${DEPS} ${CUDA_LIB}/cublas${CMAKE_STATIC_LIBRARY_SUFFIX} )
+    set(DEPS ${DEPS} ${CUDA_LIB}/cudnn${CMAKE_STATIC_LIBRARY_SUFFIX})
+  endif()
+endif()
+
+if(WITH_ENCRYPTION)
+  if(NOT WIN32)
+      include_directories("${ENCRYPTION_DIR}/include")
+      link_directories("${ENCRYPTION_DIR}/lib")
+      set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/libpmodel-decrypt${CMAKE_SHARED_LIBRARY_SUFFIX})
+  else()
+      include_directories("${ENCRYPTION_DIR}/include")
+      link_directories("${ENCRYPTION_DIR}/lib")
+      set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/pmodel-decrypt${CMAKE_STATIC_LIBRARY_SUFFIX})
+  endif()
+endif()
+
+if (NOT WIN32)
+    set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
+    set(DEPS ${DEPS} ${EXTERNAL_LIB})
+endif()
+
+set(DEPS ${DEPS} ${OpenCV_LIBS})
+add_library(paddlex_inference SHARED src/visualize src/transforms.cpp src/paddlex.cpp)
+ADD_DEPENDENCIES(paddlex_inference ext-yaml-cpp)
+target_link_libraries(paddlex_inference ${DEPS})
+
+add_executable(human_segmenter human_segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+ADD_DEPENDENCIES(human_segmenter ext-yaml-cpp)
+target_link_libraries(human_segmenter ${DEPS})
+
+
+if (WIN32 AND WITH_MKL)
+    add_custom_command(TARGET human_segmenter POST_BUILD
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./mkldnn.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ./release/mklml.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mklml/lib/libiomp5md.dll ./release/libiomp5md.dll
+        COMMAND ${CMAKE_COMMAND} -E copy_if_different ${PADDLE_DIR}/third_party/install/mkldnn/lib/mkldnn.dll ./release/mkldnn.dll
+    )
+    # for encryption
+    if (EXISTS "${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll")
+        add_custom_command(TARGET human_segmenter POST_BUILD
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./pmodel-decrypt.dll
+            COMMAND ${CMAKE_COMMAND} -E copy_if_different ${ENCRYPTION_DIR}/lib/pmodel-decrypt.dll ./release/pmodel-decrypt.dll
+        )
+    endif()
+endif()
+
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/visualize.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/config_parser.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/transforms.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/results.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/paddlex.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )

+ 208 - 0
examples/human_segmentation/deploy/cpp/human_segmenter.cpp

@@ -0,0 +1,208 @@
+//   Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <glog/logging.h>
+#include <omp.h>
+
+#include <algorithm>
+#include <chrono>  // NOLINT
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <vector>
+#include <utility>
+#include <ctime>
+#include "include/paddlex/paddlex.h"
+#include "include/paddlex/visualize.h"
+
+#if defined(__arm__) || defined(__aarch64__)
+#include <opencv2/videoio/legacy/constants_c.h>
+#endif
+
+using namespace std::chrono;  // NOLINT
+
+DEFINE_string(model_dir, "", "Path of inference model");
+DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
+DEFINE_bool(use_trt, false, "Infering with TensorRT");
+DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
+DEFINE_string(image, "", "Path of test image file");
+DEFINE_bool(use_camera, false, "Infering with Camera");
+DEFINE_int32(camera_id, 0, "Camera id");
+DEFINE_string(video_path, "", "Path of input video");
+DEFINE_bool(show_result, false, "show the result of each frame with a window");
+DEFINE_bool(save_result, true, "save the result of each frame to a video");
+DEFINE_string(save_dir, "output", "Path to save visualized image");
+
+int main(int argc, char** argv) {
+  // 解析命令行参数
+  google::ParseCommandLineFlags(&argc, &argv, true);
+
+  if (FLAGS_model_dir == "") {
+    std::cerr << "--model_dir need to be defined" << std::endl;
+    return -1;
+  }
+  if (FLAGS_image == "" & FLAGS_video_path == ""
+      & FLAGS_use_camera == false) {
+    std::cerr << "--image or --video_path or --use_camera need to be defined"
+              << std::endl;
+    return -1;
+  }
+
+  // 加载模型
+  PaddleX::Model model;
+  model.Init(FLAGS_model_dir,
+             FLAGS_use_gpu,
+             FLAGS_use_trt,
+             FLAGS_gpu_id,
+             FLAGS_key);
+  if (FLAGS_use_camera || FLAGS_video_path != "") {
+    // 打开视频流
+    cv::VideoCapture capture;
+    if (FLAGS_use_camera) {
+      capture.open(FLAGS_camera_id);
+      if (!capture.isOpened()) {
+        std::cout << "Can not open the camera "
+                  << FLAGS_camera_id << "."
+                  << std::endl;
+        return -1;
+      }
+    } else {
+      capture.open(FLAGS_video_path);
+      if (!capture.isOpened()) {
+        std::cout << "Can not open the video "
+                  << FLAGS_video_path << "."
+                  << std::endl;
+        return -1;
+      }
+    }
+
+    // 创建VideoWriter
+    cv::VideoWriter video_out;
+    std::string video_out_path;
+    if (FLAGS_save_result) {
+      // 获取视频流信息: 分辨率, 帧率
+      int video_width = static_cast<int>(capture.get(CV_CAP_PROP_FRAME_WIDTH));
+      int video_height =
+        static_cast<int>(capture.get(CV_CAP_PROP_FRAME_HEIGHT));
+      int video_fps = static_cast<int>(capture.get(CV_CAP_PROP_FPS));
+      int video_fourcc;
+      if (FLAGS_use_camera) {
+        video_fourcc = 828601953;
+      } else {
+        video_fourcc = static_cast<int>(capture.get(CV_CAP_PROP_FOURCC));
+      }
+      if (FLAGS_use_camera) {
+        time_t now = time(0);
+        video_out_path =
+          PaddleX::generate_save_path(FLAGS_save_dir,
+                                      std::to_string(now) + ".mp4");
+      } else {
+        video_out_path =
+          PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_video_path);
+      }
+      video_out.open(video_out_path.c_str(),
+                     video_fourcc,
+                     video_fps,
+                     cv::Size(video_width, video_height),
+                     true);
+      if (!video_out.isOpened()) {
+        std::cout << "Create video writer failed!" << std::endl;
+        return -1;
+      }
+    }
+
+    PaddleX::SegResult result;
+    cv::Mat frame;
+    int key;
+    while (capture.read(frame)) {
+      if (FLAGS_show_result || FLAGS_use_camera) {
+       key = cv::waitKey(1);
+       // 按下ESC退出整个程序,保存视频文件到磁盘
+       if (key == 27) {
+         break;
+       }
+      } else if (frame.empty()) {
+        break;
+      }
+      // 开始预测
+      model.predict(frame, &result);
+      // 可视化
+      std::vector<uint8_t> label_map(result.label_map.data.begin(),
+                                     result.label_map.data.end());
+      cv::Mat mask(result.label_map.shape[0],
+                   result.label_map.shape[1],
+                   CV_8UC1,
+                   label_map.data());
+      int rows = result.label_map.shape[0];
+      int cols = result.label_map.shape[1];
+      cv::Mat vis_img = frame.clone();
+      for (int i = 0; i < rows; i++) {
+        for (int j = 0; j < cols; j++) {
+          int category_id = static_cast<int>(mask.at<uchar>(i, j));
+          if (category_id == 0) {
+            vis_img.at<cv::Vec3b>(i, j)[0] = 255;
+            vis_img.at<cv::Vec3b>(i, j)[1] = 255;
+            vis_img.at<cv::Vec3b>(i, j)[2] = 255;
+          }
+        }
+      }
+      if (FLAGS_show_result || FLAGS_use_camera) {
+        cv::imshow("human_seg", vis_img);
+      }
+      if (FLAGS_save_result) {
+        video_out.write(vis_img);
+      }
+      result.clear();
+    }
+    capture.release();
+    if (FLAGS_save_result) {
+      video_out.release();
+      std::cout << "Visualized output saved as " << video_out_path << std::endl;
+    }
+    if (FLAGS_show_result || FLAGS_use_camera) {
+      cv::destroyAllWindows();
+    }
+  } else {
+    PaddleX::SegResult result;
+    cv::Mat im = cv::imread(FLAGS_image, 1);
+    model.predict(im, &result);
+    // 可视化
+    std::vector<uint8_t> label_map(result.label_map.data.begin(),
+                                   result.label_map.data.end());
+    cv::Mat mask(result.label_map.shape[0],
+                 result.label_map.shape[1],
+                 CV_8UC1,
+                 label_map.data());
+    int rows = result.label_map.shape[0];
+    int cols = result.label_map.shape[1];
+    cv::Mat vis_img = im.clone();
+    for (int i = 0; i < rows; i++) {
+      for (int j = 0; j < cols; j++) {
+        int category_id = static_cast<int>(mask.at<uchar>(i, j));
+        if (category_id == 0) {
+          vis_img.at<cv::Vec3b>(i, j)[0] = 255;
+          vis_img.at<cv::Vec3b>(i, j)[1] = 255;
+          vis_img.at<cv::Vec3b>(i, j)[2] = 255;
+        }
+      }
+    }
+    std::string save_path =
+        PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
+    cv::imwrite(save_path, vis_img);
+    result.clear();
+    std::cout << "Visualized output saved as " << save_path << std::endl;
+  }
+  return 0;
+}

+ 0 - 11
examples/meter_reader/README.md

@@ -213,8 +213,6 @@ git clone https://github.com/PaddlePaddle/PaddleX
   | use_gpu	| 是否使用 GPU 预测, 支持值为0或1(默认值为0)|
   | gpu_id	| GPU 设备ID, 默认值为0 |
   | save_dir	| 保存可视化结果的路径, 默认值为"output"|
-  | det_key	| 检测模型加密过程中产生的密钥信息,默认值为""表示加载的是未加密的检测模型 |
-  | seg_key	| 分割模型加密过程中产生的密钥信息,默认值为""表示加载的是未加密的分割模型 |
   | seg_batch_size | 分割的批量大小,默认为2 |
   | thread_num	| 分割预测的线程数,默认为cpu处理器个数 |
   | use_camera | 是否使用摄像头采集图片,支持值为0或1(默认值为0) |
@@ -245,15 +243,6 @@ git clone https://github.com/PaddlePaddle/PaddleX
   ./build/meter_reader/meter_reader --det_model_dir=/path/to/det_inference_model --seg_model_dir=/path/to/seg_inference_model --use_camera=1 --use_gpu=1 --use_erode=1 --save_dir=output
   ```
 
-  * 使用加密后的模型对单张图片做预测
-
-  如果未对模型进行加密,请参考[加密PaddleX模型](../../docs/deploy/server/encryption.md#13-加密paddlex模型)对模型进行加密。例如加密后的检测模型所在目录为`/path/to/encrypted_det_inference_model`,密钥为`yEBLDiBOdlj+5EsNNrABhfDuQGkdcreYcHcncqwdbx0=`;加密后的分割模型所在目录为`/path/to/encrypted_seg_inference_model`,密钥为`DbVS64I9pFRo5XmQ8MNV2kSGsfEr4FKA6OH9OUhRrsY=`
-
-  ```shell
-  ./build/meter_reader/meter_reader --det_model_dir=/path/to/encrypted_det_inference_model --seg_model_dir=/path/to/encrypted_seg_inference_model --image=/path/to/test.jpg --use_gpu=1 --use_erode=1 --save_dir=output --det_key yEBLDiBOdlj+5EsNNrABhfDuQGkdcreYcHcncqwdbx0= --seg_key DbVS64I9pFRo5XmQ8MNV2kSGsfEr4FKA6OH9OUhRrsY=
-  ```
-
-
 ## <h2 id="5">模型训练</h2>