浏览代码

Merge pull request #60 from jiangjiajun/secure_cpp

Secure cpp
Jason 5 年之前
父节点
当前提交
a8d497a3ce

+ 52 - 4
deploy/cpp/CMakeLists.txt

@@ -5,12 +5,29 @@ option(WITH_MKL        "Compile demo with MKL/OpenBlas support,defaultuseMKL."
 option(WITH_GPU        "Compile demo with GPU/CPU, default use CPU."                    ON)
 option(WITH_STATIC_LIB "Compile demo with static/shared library, default use static."   OFF)
 option(WITH_TENSORRT "Compile demo with TensorRT."   OFF)
+option(WITH_ENCRYPTION "Compile demo with encryption tool."   OFF)
 
-SET(TENSORRT_DIR "" CACHE PATH "Compile demo with TensorRT")
+SET(TENSORRT_DIR "" CACHE PATH "Location of libraries")
 SET(PADDLE_DIR "" CACHE PATH "Location of libraries")
 SET(OPENCV_DIR "" CACHE PATH "Location of libraries")
+SET(ENCRYPTION_DIR"" CACHE PATH "Location of libraries")
 SET(CUDA_LIB "" CACHE PATH "Location of libraries")
 
+if (NOT WIN32)
+    set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+    set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/lib)
+    set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/demo)
+else()
+    set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+    set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+    set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/paddlex_inference)
+endif()
+
+if (NOT WIN32)
+    SET(YAML_BUILD_TYPE ON CACHE BOOL "yaml build shared library.")
+else()
+    SET(YAML_BUILD_TYPE OFF CACHE BOOL "yaml build shared library.")
+endif()
 include(cmake/yaml-cpp.cmake)
 
 include_directories("${CMAKE_SOURCE_DIR}/")
@@ -27,6 +44,11 @@ macro(safe_set_static_flag)
     endforeach(flag_var)
 endmacro()
 
+
+if (WITH_ENCRYPTION)
+add_definitions( -DWITH_ENCRYPTION=${WITH_ENCRYPTION})
+endif()
+
 if (WITH_MKL)
     ADD_DEFINITIONS(-DUSE_MKL)
 endif()
@@ -183,6 +205,7 @@ else()
     set(DEPS ${DEPS}
         ${MATH_LIB} ${MKLDNN_LIB}
         glog gflags_static libprotobuf zlibstatic xxhash libyaml-cppmt)
+
     set(DEPS ${DEPS} libcmt shlwapi)
     if (EXISTS "${PADDLE_DIR}/third_party/install/snappy/lib")
         set(DEPS ${DEPS} snappy)
@@ -207,21 +230,35 @@ if(WITH_GPU)
   endif()
 endif()
 
+if(WITH_ENCRYPTION)
+  if(NOT WIN32)
+      include_directories("${ENCRYPTION_DIR}/include")
+      link_directories("${ENCRYPTION_DIR}/lib")
+      set(DEPS ${DEPS} ${ENCRYPTION_DIR}/lib/libpmodel-decrypt${CMAKE_SHARED_LIBRARY_SUFFIX})
+  else()
+    message(FATAL_ERROR "Encryption Tool don't support WINDOWS")
+  endif()
+endif()
+
 if (NOT WIN32)
     set(EXTERNAL_LIB "-ldl -lrt -lgomp -lz -lm -lpthread")
     set(DEPS ${DEPS} ${EXTERNAL_LIB})
 endif()
 
 set(DEPS ${DEPS} ${OpenCV_LIBS})
-add_executable(classifier src/classifier.cpp src/transforms.cpp src/paddlex.cpp)
+add_library(paddlex_inference SHARED src/visualize src/transforms.cpp src/paddlex.cpp)
+ADD_DEPENDENCIES(paddlex_inference ext-yaml-cpp)
+target_link_libraries(paddlex_inference ${DEPS})
+
+add_executable(classifier demo/classifier.cpp src/transforms.cpp src/paddlex.cpp)
 ADD_DEPENDENCIES(classifier ext-yaml-cpp)
 target_link_libraries(classifier ${DEPS})
 
-add_executable(detector src/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+add_executable(detector demo/detector.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
 ADD_DEPENDENCIES(detector ext-yaml-cpp)
 target_link_libraries(detector ${DEPS})
 
-add_executable(segmenter src/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
+add_executable(segmenter demo/segmenter.cpp src/transforms.cpp src/paddlex.cpp src/visualize.cpp)
 ADD_DEPENDENCIES(segmenter ext-yaml-cpp)
 target_link_libraries(segmenter ${DEPS})
 
@@ -252,3 +289,14 @@ if (WIN32 AND WITH_MKL)
     )
 
 endif()
+
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/visualize.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/config_parser.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/transforms.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/results.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )
+file(COPY  "${CMAKE_SOURCE_DIR}/include/paddlex/paddlex.h"
+DESTINATION  "${CMAKE_BINARY_DIR}/include/"  )

+ 1 - 1
deploy/cpp/cmake/yaml-cpp.cmake

@@ -14,7 +14,7 @@ ExternalProject_Add(
         -DYAML_CPP_INSTALL=OFF
         -DYAML_CPP_BUILD_CONTRIB=OFF
         -DMSVC_SHARED_RT=OFF
-        -DBUILD_SHARED_LIBS=OFF
+        -DBUILD_SHARED_LIBS=${YAML_BUILD_TYPE}
         -DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
         -DCMAKE_CXX_FLAGS=${CMAKE_CXX_FLAGS}
         -DCMAKE_CXX_FLAGS_DEBUG=${CMAKE_CXX_FLAGS_DEBUG}

+ 2 - 1
deploy/cpp/src/classifier.cpp → deploy/cpp/demo/classifier.cpp

@@ -25,6 +25,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
 DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
 DEFINE_bool(use_trt, false, "Infering with TensorRT");
 DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
 DEFINE_string(image, "", "Path of test image file");
 DEFINE_string(image_list, "", "Path of test image list file");
 
@@ -43,7 +44,7 @@ int main(int argc, char** argv) {
 
   // 加载模型
   PaddleX::Model model;
-  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id);
+  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
 
   // 进行预测
   if (FLAGS_image_list != "") {

+ 4 - 3
deploy/cpp/src/detector.cpp → deploy/cpp/demo/detector.cpp

@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
 DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
 DEFINE_bool(use_trt, false, "Infering with TensorRT");
 DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
 DEFINE_string(image, "", "Path of test image file");
 DEFINE_string(image_list, "", "Path of test image list file");
 DEFINE_string(save_dir, "output", "Path to save visualized image");
@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
 
   // 加载模型
   PaddleX::Model model;
-  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id);
+  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
 
   auto colormap = PaddleX::GenerateColorMap(model.labels.size());
   std::string save_dir = "output";
@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
 
       // 可视化
       cv::Mat vis_img =
-          PaddleX::VisualizeDet(im, result, model.labels, colormap, 0.5);
+          PaddleX::Visualize(im, result, model.labels, colormap, 0.5);
       std::string save_path =
           PaddleX::generate_save_path(FLAGS_save_dir, image_path);
       cv::imwrite(save_path, vis_img);
@@ -97,7 +98,7 @@ int main(int argc, char** argv) {
 
     // 可视化
     cv::Mat vis_img =
-        PaddleX::VisualizeDet(im, result, model.labels, colormap, 0.5);
+        PaddleX::Visualize(im, result, model.labels, colormap, 0.5);
     std::string save_path =
         PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
     cv::imwrite(save_path, vis_img);

+ 4 - 3
deploy/cpp/src/segmenter.cpp → deploy/cpp/demo/segmenter.cpp

@@ -26,6 +26,7 @@ DEFINE_string(model_dir, "", "Path of inference model");
 DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
 DEFINE_bool(use_trt, false, "Infering with TensorRT");
 DEFINE_int32(gpu_id, 0, "GPU card id");
+DEFINE_string(key, "", "key of encryption");
 DEFINE_string(image, "", "Path of test image file");
 DEFINE_string(image_list, "", "Path of test image list file");
 DEFINE_string(save_dir, "output", "Path to save visualized image");
@@ -45,7 +46,7 @@ int main(int argc, char** argv) {
 
   // 加载模型
   PaddleX::Model model;
-  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id);
+  model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key);
 
   auto colormap = PaddleX::GenerateColorMap(model.labels.size());
   // 进行预测
@@ -62,7 +63,7 @@ int main(int argc, char** argv) {
       model.predict(im, &result);
       // 可视化
       cv::Mat vis_img =
-          PaddleX::VisualizeSeg(im, result, model.labels, colormap);
+          PaddleX::Visualize(im, result, model.labels, colormap);
       std::string save_path =
           PaddleX::generate_save_path(FLAGS_save_dir, image_path);
       cv::imwrite(save_path, vis_img);
@@ -74,7 +75,7 @@ int main(int argc, char** argv) {
     cv::Mat im = cv::imread(FLAGS_image, 1);
     model.predict(im, &result);
     // 可视化
-    cv::Mat vis_img = PaddleX::VisualizeSeg(im, result, model.labels, colormap);
+    cv::Mat vis_img = PaddleX::Visualize(im, result, model.labels, colormap);
     std::string save_path =
         PaddleX::generate_save_path(FLAGS_save_dir, FLAGS_image);
     cv::imwrite(save_path, vis_img);

+ 13 - 6
deploy/cpp/include/paddlex/paddlex.h

@@ -28,9 +28,14 @@
 
 #include "paddle_inference_api.h"  // NOLINT
 
-#include "include/paddlex/config_parser.h"
-#include "include/paddlex/results.h"
-#include "include/paddlex/transforms.h"
+#include "config_parser.h"
+#include "results.h"
+#include "transforms.h"
+
+#ifdef WITH_ENCRYPTION
+#include "paddle_model_decrypt.h"
+#include "model_code.h"
+#endif
 
 namespace PaddleX {
 
@@ -39,14 +44,16 @@ class Model {
   void Init(const std::string& model_dir,
             bool use_gpu = false,
             bool use_trt = false,
-            int gpu_id = 0) {
-    create_predictor(model_dir, use_gpu, use_trt, gpu_id);
+            int gpu_id = 0,
+            std::string key = "") {
+    create_predictor(model_dir, use_gpu, use_trt, gpu_id, key);
   }
 
   void create_predictor(const std::string& model_dir,
                         bool use_gpu = false,
                         bool use_trt = false,
-                        int gpu_id = 0);
+                        int gpu_id = 0,
+                        std::string key = "");
 
   bool load_config(const std::string& model_dir);
 

+ 2 - 2
deploy/cpp/include/paddlex/visualize.h

@@ -46,13 +46,13 @@ namespace PaddleX {
 // Generate visualization colormap for each class
 std::vector<int> GenerateColorMap(int num_class);
 
-cv::Mat VisualizeDet(const cv::Mat& img,
+cv::Mat Visualize(const cv::Mat& img,
                      const DetResult& results,
                      const std::map<int, std::string>& labels,
                      const std::vector<int>& colormap,
                      float threshold = 0.5);
 
-cv::Mat VisualizeSeg(const cv::Mat& img,
+cv::Mat Visualize(const cv::Mat& img,
                      const SegResult& result,
                      const std::map<int, std::string>& labels,
                      const std::vector<int>& colormap);

+ 7 - 0
deploy/cpp/scripts/build.sh

@@ -16,6 +16,11 @@ CUDA_LIB=/path/to/cuda/lib/
 # CUDNN 的 lib 路径
 CUDNN_LIB=/path/to/cudnn/lib/
 
+# 是否加载加密后的模型
+WITH_ENCRYPTION=OFF
+# 加密工具的路径
+ENCRYPTION_DIR=/path/to/encryption_tool/
+
 # OPENCV 路径, 如果使用自带预编译版本可不修改
 OPENCV_DIR=$(pwd)/deps/opencv3gcc4.8/
 sh $(pwd)/scripts/bootstrap.sh
@@ -28,10 +33,12 @@ cmake .. \
     -DWITH_GPU=${WITH_GPU} \
     -DWITH_MKL=${WITH_MKL} \
     -DWITH_TENSORRT=${WITH_TENSORRT} \
+    -DWITH_ENCRYPTION=${WITH_ENCRYPTION} \
     -DTENSORRT_DIR=${TENSORRT_DIR} \
     -DPADDLE_DIR=${PADDLE_DIR} \
     -DWITH_STATIC_LIB=${WITH_STATIC_LIB} \
     -DCUDA_LIB=${CUDA_LIB} \
     -DCUDNN_LIB=${CUDNN_LIB} \
+    -DENCRYPTION_DIR=${ENCRYPTION_DIR} \
     -DOPENCV_DIR=${OPENCV_DIR}
 make

+ 10 - 2
deploy/cpp/src/paddlex.cpp

@@ -19,7 +19,8 @@ namespace PaddleX {
 void Model::create_predictor(const std::string& model_dir,
                              bool use_gpu,
                              bool use_trt,
-                             int gpu_id) {
+                             int gpu_id,
+                             std::string key) {
   // 读取配置文件
   if (!load_config(model_dir)) {
     std::cerr << "Parse file 'model.yml' failed!" << std::endl;
@@ -28,7 +29,14 @@ void Model::create_predictor(const std::string& model_dir,
   paddle::AnalysisConfig config;
   std::string model_file = model_dir + OS_PATH_SEP + "__model__";
   std::string params_file = model_dir + OS_PATH_SEP + "__params__";
-  config.SetModel(model_file, params_file);
+#ifdef WITH_ENCRYPTION
+  if (key != ""){
+    paddle_security_load_model(&config, key.c_str(), model_file.c_str(), params_file.c_str());
+  }
+#endif
+  if (key == ""){
+    config.SetModel(model_file, params_file);
+  }
   if (use_gpu) {
     config.EnableUseGpu(100, gpu_id);
   } else {

+ 2 - 2
deploy/cpp/src/visualize.cpp

@@ -31,7 +31,7 @@ std::vector<int> GenerateColorMap(int num_class) {
   return colormap;
 }
 
-cv::Mat VisualizeDet(const cv::Mat& img,
+cv::Mat Visualize(const cv::Mat& img,
                      const DetResult& result,
                      const std::map<int, std::string>& labels,
                      const std::vector<int>& colormap,
@@ -105,7 +105,7 @@ cv::Mat VisualizeDet(const cv::Mat& img,
   return vis_img;
 }
 
-cv::Mat VisualizeSeg(const cv::Mat& img,
+cv::Mat Visualize(const cv::Mat& img,
                      const SegResult& result,
                      const std::map<int, std::string>& labels,
                      const std::vector<int>& colormap) {

+ 3 - 3
docs/tutorials/deploy/deploy_cpp_linux.md

@@ -95,7 +95,7 @@ make
  ```
 
 ### Step5: 预测及可视化
-编译成功后,预测demo的可执行程序分别为`build/detector`,`build/classifer`,`build/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+编译成功后,预测demo的可执行程序分别为`build/demo/detector`,`build/demo/classifer`,`build/demo/segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
 
 |  参数   | 说明  |
 |  ----  | ----  |
@@ -116,7 +116,7 @@ make
 不使用`GPU`测试图片 `/path/to/xiaoduxiong.jpeg`  
 
 ```shell
-./build/detector --model_dir=/path/to/inference_model --image=/path/to/xiaoduxiong.jpeg --save_dir=output
+./build/demo/detector --model_dir=/path/to/inference_model --image=/path/to/xiaoduxiong.jpeg --save_dir=output
 ```
 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。
 
@@ -131,6 +131,6 @@ make
 /path/to/images/xiaoduxiongn.jpeg
 ```
 ```shell
-./build/detector --model_dir=/path/to/models/inference_model --image_list=/root/projects/images_list.txt --use_gpu=1 --save_dir=output
+./build/demo/detector --model_dir=/path/to/models/inference_model --image_list=/root/projects/images_list.txt --use_gpu=1 --save_dir=output
 ```
 图片文件`可视化预测结果`会保存在`save_dir`参数设置的目录下。

+ 1 - 1
docs/tutorials/deploy/deploy_cpp_win_vs2019.md

@@ -106,7 +106,7 @@ d:
 cd D:\projects\PaddleX\deploy\cpp\out\build\x64-Release
 ```
 
-编译成功后,预测demo的入口程序为`detector`,`classifer`,`segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
+编译成功后,预测demo的入口程序为`demo\detector`,`demo\classifer`,`demo\segmenter`,用户可根据自己的模型类型选择,其主要命令参数说明如下:
 
 |  参数   | 说明  |
 |  ----  | ----  |