Kaynağa Gözat

Develop encryption (#934)

* modify dencrypt

* modify dencrypt

* modify dencrypt
heliqi 4 yıl önce
ebeveyn
işleme
ac2750910e

+ 0 - 7
dygraph/deploy/cpp/CMakeLists.txt

@@ -266,13 +266,6 @@ if (WITH_TENSORRT)
   target_link_libraries(tensorrt_infer ${DEPS})
 endif()
 
-if (WITH_ENCRYPTION)
-  add_executable(decrypt_infer ${PROJECT_ROOT_DIR}/demo/decrypt_infer.cpp ${SRC}
-                               ${ENGINE_SRC} ${DETECTOR_SRC} ${ENCRYPTION_SRC})
-  ADD_DEPENDENCIES(decrypt_infer ext-yaml-cpp)
-  target_link_libraries(decrypt_infer ${DEPS})
-endif()
-
 if(WIN32)
   add_custom_command(TARGET model_infer POST_BUILD
     COMMAND ${CMAKE_COMMAND} -E copy ${PADDLE_DIR}/third_party/install/mklml/lib/mklml.dll ${CMAKE_BINARY_DIR}/paddle_deploy

+ 0 - 69
dygraph/deploy/cpp/demo/decrypt_infer.cpp

@@ -1,69 +0,0 @@
-// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-//     http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#include <gflags/gflags.h>
-#include <string>
-#include <vector>
-
-#include "model_deploy/common/include/paddle_deploy.h"
-
-DEFINE_string(model_filename, "", "Path of det inference model");
-DEFINE_string(params_filename, "", "Path of det inference params");
-DEFINE_string(cfg_file, "", "Path of yaml file");
-DEFINE_string(model_type, "", "model type");
-DEFINE_string(image, "", "Path of test image file");
-DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
-DEFINE_int32(gpu_id, 0, "GPU card id");
-DEFINE_string(key, "", "encrypt key");
-
-int main(int argc, char** argv) {
-  // Parsing command-line
-  google::ParseCommandLineFlags(&argc, &argv, true);
-
-  // create model
-  PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
-
-  // model init
-  model->Init(FLAGS_cfg_file, FLAGS_key);
-
-
-  // inference engine init
-  PaddleDeploy::PaddleEngineConfig engine_config;
-  // encryption
-  if ("" != FLAGS_key) {
-    engine_config.key = FLAGS_key;
-    engine_config.model_filename = decrypt_file(FLAGS_model_filename.c_str(),
-                                                FLAGS_key.c_str());
-    engine_config.params_filename = decrypt_file(FLAGS_params_filename.c_str(),
-                                                 FLAGS_key.c_str());
-  } else {
-    engine_config.model_filename = FLAGS_model_filename;
-    engine_config.params_filename = FLAGS_params_filename;
-  }
-  engine_config.use_gpu = FLAGS_use_gpu;
-  engine_config.gpu_id = FLAGS_gpu_id;
-  model->PaddleEngineInit(engine_config);
-
-  // prepare data
-  std::vector<cv::Mat> imgs;
-  imgs.push_back(std::move(cv::imread(FLAGS_image)));
-
-  // predict
-  std::vector<PaddleDeploy::Result> results;
-  model->Predict(imgs, &results, 1);
-
-  std::cout << results[0] << std::endl;
-  delete model;
-  return 0;
-}

+ 1 - 0
dygraph/deploy/cpp/demo/model_infer.cpp

@@ -43,6 +43,7 @@ int main(int argc, char** argv) {
   engine_config.params_filename = FLAGS_params_filename;
   engine_config.use_gpu = FLAGS_use_gpu;
   engine_config.gpu_id = FLAGS_gpu_id;
+  engine_config.key = FLAGS_key;
   model->PaddleEngineInit(engine_config);
 
   // prepare data

+ 7 - 1
dygraph/deploy/cpp/docs/apis/model.md

@@ -45,7 +45,7 @@ PaddleDeploy::Model*  PaddleDeploy::ModelFactory::CreateObject(const std::string
 ### 2.1 模型前后处理初始化
 
 ```C++
-bool Model::Init(const std::string& cfg_file)
+bool Model::Init(const std::string& cfg_file, const std::string key = "")
 ```
 
 > 读取模型的配置文件,初始化模型预测过程中的数据预处理和后处理等相关操作
@@ -53,6 +53,8 @@ bool Model::Init(const std::string& cfg_file)
 **参数**
 
 > > **cfg_file** 配置文件路径,如PaddleDetection导出的模型中的`infer_cfg.yml`
+>
+> > > **key** 模型加密的key,用于解密模型。默认为空,表示加载普通模型;如果非空则用该key解密模型并加载部署
 
 **返回值**
 
@@ -64,6 +66,8 @@ bool Model::Init(const std::string& cfg_file)
 > if (!model->Init("yolov3_mbv1/model/infer_cfg.yml")) {
 >     std::cerr << "Fail to execute model->Init()" << std::endl;
 > }
+> //  解密
+> model->Init("yolov3_mbv1/model/infer_cfg.yml", "2DTPfe+K+I/hkHlDMDAoXdVzotbC8UCF9Ti0rwWd+KU=")
 >```
 
 
@@ -331,6 +335,8 @@ bool Model::Postprocess(const std::vector<PaddleDeploy::DataBlob>& outputs,
 
   std::string params_filename = ""; // 模型参数
 
+  std::string key = ""; // 模型加密的key, 用于解密经过加密的模型。如果为空,则直接加载普通模型部署。
+
   bool use_mkl = true; // 是否开启mkl
 
   int mkl_thread_num = 8; // mkl并行线程数

+ 3 - 3
dygraph/deploy/cpp/docs/demo/decrypt_infer.md

@@ -1,6 +1,6 @@
 # 模型加密预测示例
 
-本文档说明`PaddleX/deploy/cpp/demo/decrypt_infer.cpp`编译后的使用方法,仅供用户参考进行使用,开发者可基于此demo示例进行二次开发,满足集成的需求。
+本文档说明如何对模型进行加密解密部署,仅供用户参考进行使用,开发者可基于此demo示例进行二次开发,满足集成的需求。
 
 ## 步骤一、编译
 参考编译文档
@@ -8,7 +8,7 @@
 - [Linux系统上编译指南](../compile/paddle/linux.md)
 - [Windows系统上编译指南](../compile/paddle/windows.md)
 
-**注意**:编译时打开加密开关WITH_ENCRYPTION, 并填写OpenSSL路径
+**注意**:编译时一定要打开加密开关WITH_ENCRYPTION, 并填写OpenSSL路径
 
 ## 步骤二、准备PaddlePaddle部署模型
 开发者可从以下套件获取部署模型,需要注意,部署时需要准备的是导出来的部署模型,一般包含`model.pdmodel`、`model.pdiparams`和`deploy.yml`三个文件,分别表示模型结构、模型权重和各套件自行定义的配置信息。
@@ -58,7 +58,7 @@ yolov3_encryption
 
 ```sh
 # 使用GPU 加参数 --use_gpu=1
-build/demo/decrypt_infer --model_filename=yolov3_encryption/encrypted.pdmodel \
+build/demo/model_infer --model_filename=yolov3_encryption/encrypted.pdmodel \
                          --params_filename=yolov3_encryption/encrypted.pdparams \
                          --cfg_file=yolov3_encryption/model/encrypted.yml \
                          --image=yolov3_mbv1/images/000000010583.jpg \

+ 13 - 4
dygraph/deploy/cpp/model_deploy/engine/src/ppinference_engine.cpp

@@ -29,10 +29,19 @@ bool PaddleInferenceEngine::Init(const InferenceConfig& infer_config) {
     config.SetModel(engine_config.model_filename,
                   engine_config.params_filename);
   } else {
-    config.SetModelBuffer(engine_config.model_filename.c_str(),
-                          engine_config.model_filename.size(),
-                          engine_config.params_filename.c_str(),
-                          engine_config.params_filename.size());
+#ifdef PADDLEX_DEPLOY_ENCRYPTION
+    std::string model = decrypt_file(engine_config.model_filename.c_str(),
+                                     engine_config.key.c_str());
+    std::string params = decrypt_file(engine_config.params_filename.c_str(),
+                                      engine_config.key.c_str());
+    config.SetModelBuffer(model.c_str(),
+                          model.size(),
+                          params.c_str(),
+                          params.size());
+#else
+    std::cerr << "Don't open with_encryption on compile" << std::endl;
+    return false;
+#endif  // PADDLEX_DEPLOY_ENCRYPTION
   }
   if (engine_config.use_mkl && !engine_config.use_gpu) {
     config.EnableMKLDNN();