瀏覽代碼

add_C#_docs (#895)

yzl19940819 4 年之前
父節點
當前提交
bf839594a5

+ 2 - 1
README.md

@@ -16,7 +16,8 @@
 
 
 ## :heart: 重磅功能升级提醒
-* 全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署,[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp)。
+* 全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署,[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp/docs/manufacture_sdk)。
+
 
 * PaddleX部署全面升级,支持飞桨视觉套件PaddleDetection、PaddleClas、PaddleSeg、PaddleX的统一部署能力,[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp)。
 ## :factory: 重要活动提醒

+ 1 - 1
dygraph/README.md

@@ -8,7 +8,7 @@
  <p align= "center"> PaddleX -- 飞桨全流程开发工具,以低代码的形式支持开发者快速实现产业实际项目落地 </p>
 
 ## :heart:重磅功能升级
-* 全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp)。
+* 全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp/docs/manufacture_sdk)。
 
 * PaddleX部署全面升级,支持飞桨视觉套件PaddleDetection、PaddleClas、PaddleSeg、PaddleX的统一部署能力。[欢迎体验](https://github.com/PaddlePaddle/PaddleX/tree/develop/dygraph/deploy/cpp)。
 

+ 1 - 1
dygraph/deploy/cpp/docs/manufacture_sdk/README.md

@@ -4,7 +4,7 @@ PaddleX-Deploy全面升级,支持飞桨视觉套件PaddleX、PaddleDetection
 
 在工业部署的开发过程中,常常因环境问题导致在部署代码编译环节中耗费较多的时间和人力成本。如果产线上的业务逻辑稍微复杂一点,尤其是串联多个模型时,则需要在模型推理前插入预处理、中间结果处理等操作,如此复杂的逻辑对应的部署代码开发工程量是很大的。
 
-为更进一步地提升部署效率,**:heart:PaddleX部署全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署。:heart:**
+为更进一步地提升部署效率,**:heart: PaddleX部署全新发布Manufacture SDK,提供工业级多端多平台部署加速的预编译飞桨部署开发包(SDK),通过配置业务逻辑流程文件即可以低代码方式快速完成推理部署。:heart:**
 
 
 ## 目录

+ 87 - 0
dygraph/examples/C#_deploy/Program.cs

@@ -0,0 +1,87 @@
+using System;
+using System.Runtime.InteropServices;
+using System.Drawing.Imaging;
+using System.Drawing;
+
+namespace ConsoleApp2
+{
+    class Program
+    {
+        [DllImport("model_infer.dll", EntryPoint = "InitModel")]
+        public static extern void InitModel(string model_type, string model_filename, string params_filename, string cfg_file);
+
+        [DllImport("model_infer.dll", EntryPoint = "ModelPredict")]
+        public static extern void ModelPredict(byte[] img, int W, int H, int C, IntPtr output, int[] BoxesNum, ref byte label);
+
+        [DllImport("model_infer.dll", EntryPoint = "DestructModel")]
+        public static extern void DestructModel();
+
+
+        static void Main(string[] args)
+        {
+            string imgfile = "E:\\PaddleX_deploy\\PaddleX\\dygraph\\deploy\\cpp\\out\\paddle_deploy\\1.png";
+            string model_type = "det";
+            string model_filename = "E:\\PaddleX_deploy\\PaddleX\\dygraph\\deploy\\cpp\\out\\paddle_deploy\\yolov3_darknet53_270e_coco1\\model.pdmodel";
+            string params_filename = "E:\\PaddleX_deploy\\PaddleX\\dygraph\\deploy\\cpp\\out\\paddle_deploy\\yolov3_darknet53_270e_coco1\\model.pdiparams";
+            string cfg_file = "E:\\PaddleX_deploy\\PaddleX\\dygraph\\deploy\\cpp\\out\\paddle_deploy\\yolov3_darknet53_270e_coco1\\infer_cfg.yml";
+
+
+            InitModel(model_type, model_filename, params_filename, cfg_file);
+
+
+            Bitmap bmp = new Bitmap(imgfile);
+            byte[] inputData = GetBGRValues(bmp, out int stride);
+
+            float[] resultlist = new float[600];
+            IntPtr results = FloatToIntptr(resultlist);
+            int[] boxesInfo = new int[1];
+            Byte[] labellist = new Byte[1000];    //新建字节数组
+
+            //第四个参数为输入图像的通道数
+            ModelPredict(inputData, bmp.Width, bmp.Height, 4, results, boxesInfo, ref labellist[0]);
+            string strGet = System.Text.Encoding.Default.GetString(labellist, 0, labellist.Length);    //将字节数组转换为字符串
+            Console.WriteLine("labellist: {0}", strGet);
+            for (int i = 0; i < boxesInfo[0]; i++)
+            {
+                int labelindex = Convert.ToInt32(resultlist[i * 6 + 0]);
+                float score = resultlist[i * 6 + 1];
+                float left = resultlist[i * 6 + 2];
+                float top = resultlist[i * 6 + 3];
+                float width = resultlist[i * 6 + 4];
+                float height = resultlist[i * 6 + 5];
+                Console.WriteLine("score: {0}", score);
+                Console.WriteLine("labelindex: {0}", labelindex);
+                Console.WriteLine("boxe: {0} {1} {2} {3}", left, top, width, height);
+            }
+
+            DestructModel();
+        }
+
+        // 将Btimap类转换为byte[]类函数
+        public static byte[] GetBGRValues(Bitmap bmp, out int stride)
+        {
+            var rect = new Rectangle(0, 0, bmp.Width, bmp.Height);
+            var bmpData = bmp.LockBits(rect, ImageLockMode.ReadOnly, bmp.PixelFormat);
+            stride = bmpData.Stride;
+            var rowBytes = bmpData.Width * Image.GetPixelFormatSize(bmp.PixelFormat) / 8;
+            var imgBytes = bmp.Height * rowBytes;
+            byte[] rgbValues = new byte[imgBytes];
+            IntPtr ptr = bmpData.Scan0;
+            for (var i = 0; i < bmp.Height; i++)
+            {
+                Marshal.Copy(ptr, rgbValues, i * rowBytes, rowBytes);
+                ptr += bmpData.Stride;
+            }
+            bmp.UnlockBits(bmpData);
+            return rgbValues;
+        }
+
+        public static IntPtr FloatToIntptr(float[] bytes)
+        {
+            GCHandle hObject = GCHandle.Alloc(bytes, GCHandleType.Pinned);
+            return hObject.AddrOfPinnedObject();
+        }
+
+
+    }
+}

+ 97 - 0
dygraph/examples/C#_deploy/README.md

@@ -0,0 +1,97 @@
+# 兼容并包的PaddleX-Inference部署方式
+
+在新版本的PaddleX中,对于CPP的部署代码方式做了非常大的变化:
+* 支持用户将PaddleDetection PaddleSeg PaddleClas训练出来的模型通过一套部署代码实现快速部署,实现了多个模型的打通。
+* 对于一个视觉任务里面既有检测,又有分割来说,极大的简化了使用的流程。
+* 提供了更好用的tensorRT加速的使用方式。
+
+
+下面我们具体以Windows系统为例,基于PaddleX的这套CPP,说明一下如何实现工业化的部署(trt加速)
+
+项目使用环境说明:
+
+* CUDA10.2  Cudnn 7.6
+* opencv版本3.4.6
+* PaddleInference 10.2的预测库
+* TensorRT 7.0.0
+* Cmake 3.5
+* VS2019 社区版
+
+
+ ## 1 环境准备
+
+* 下载[opencv](https://bj.bcebos.com/paddleseg/deploy/opencv-3.4.6-vc14_vc15.exe),并设置系统环境变量
+
+<div align="center">
+<img src="./images/1.png"  width = "800" />              </div>
+
+* 下载Tensorrt,并设置系统环境变量
+在本项目中使用的cuda版本是10.2,下载对应的trt版本
+
+
+<div align="center">
+<img src="./images/13.png"  width = "800" />              </div>
+
+
+
+## 2 代码编译
+下载好PaddleX代码
+
+`git clone `
+
+使用Cmake进行编译,我们主要对`PaddleX/dygraph/deploy/cpp`中代码进行编译,并创建`out`文件夹用来承接编译生成的内容,
+
+<div align="center">
+<img src="./images/2.png"  width = "800" />              </div>
+
+点击Configure进行选项筛选,并选择X64一项,并点击finish
+<div align="center">
+<img src="./images/3.png"  width = "800" />              </div>
+在运行上述步骤后,出现报错,并在空白区域出现待补充的内容。
+<div align="center">
+<img src="./images/4.png"  width = "800" />              </div>
+
+用户在这里补充opencv tensorrt paddle预测库,cuda的lib库的路径,并且勾选WITH_GPU  WITH_MKL WITH_TENSORRT 几项然后重新进行生成
+<div align="center">
+<img src="./images/5.png"  width = "800" />              </div>
+依次点击修改
+
+<div align="center">
+<img src="./images/6.png"  width = "800" />              </div>
+最终在out文件夹中出现了.sln文件,则表示通过cmake生成成功了解决方案
+<div align="center">
+<img src="./images/7.png"  width = "800" />              </div>
+打开sln文件,会发现在PaddleDeploy目录下生成了7个项目,其中关键的是
+
+`batch_infer` 
+
+`model_infer` 
+
+`multi_gpu_model_infer`
+
+ `tensorrt_infer`
+
+<div align="center">
+<img src="./images/8.png"  width = "800" />              </div>
+
+## 3 生成dll
+### 3.1 修改cmakelists
+<div align="center">
+<img src="./images/17.png"  width = "800" />             </div>
+
+
+### 3.2 修改model_infer.cpp并重新生成dll
+* 该部分代码已经提供
+
+### 3.3 创建一个c#项目并被调用
+* 将生成的dll导入到C#项目中
+<div align="center">
+<img src="./images/16.png"  width = "800" />             </div>
+
+* 3.4 执行C#项目去预测
+<div align="center">
+<img src="./images/15.png"  width = "800" />             </div>
+
+
+-------------
+## 修改后的model_infer.cpp和C#代码已经提供.

二進制
dygraph/examples/C#_deploy/images/1.png


二進制
dygraph/examples/C#_deploy/images/10.png


二進制
dygraph/examples/C#_deploy/images/11.png


二進制
dygraph/examples/C#_deploy/images/12.png


二進制
dygraph/examples/C#_deploy/images/13.png


二進制
dygraph/examples/C#_deploy/images/14.png


二進制
dygraph/examples/C#_deploy/images/15.png


二進制
dygraph/examples/C#_deploy/images/16.png


二進制
dygraph/examples/C#_deploy/images/17.png


二進制
dygraph/examples/C#_deploy/images/2.png


二進制
dygraph/examples/C#_deploy/images/3.png


二進制
dygraph/examples/C#_deploy/images/4.png


二進制
dygraph/examples/C#_deploy/images/5.png


二進制
dygraph/examples/C#_deploy/images/6.png


二進制
dygraph/examples/C#_deploy/images/7.png


二進制
dygraph/examples/C#_deploy/images/8.png


二進制
dygraph/examples/C#_deploy/images/9.png


+ 163 - 0
dygraph/examples/C#_deploy/model_infer.cpp

@@ -0,0 +1,163 @@
+// Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <gflags/gflags.h>
+#include <string>
+#include <vector>
+
+#include "model_deploy/common/include/paddle_deploy.h"
+
+PaddleDeploy::Model* model;
+
+extern "C" __declspec(dllexport) void InitModel(const char* model_type, const char* model_filename, const char* params_filename, const char* cfg_file)
+{
+	bool use_gpu = false;
+	int gpu_id = 0;
+
+	// create model
+	model = PaddleDeploy::CreateModel(model_type);  //FLAGS_model_type
+
+	// model init
+	model->Init(cfg_file);
+
+	// inference engine init
+	PaddleDeploy::PaddleEngineConfig engine_config;
+	engine_config.model_filename = model_filename;
+	engine_config.params_filename = params_filename;
+	engine_config.use_gpu = use_gpu;
+	engine_config.gpu_id = gpu_id;
+	bool init = model->PaddleEngineInit(engine_config);
+	if (init)
+	{
+		std::cout << "init model success" << std::endl;
+	}	
+}
+/*
+* img: input for predicting.
+* 
+* nWidth: width of img.
+* 
+* nHeight: height of img.
+* 
+* nChannel: channel of img.
+* 
+* output: result of pridict ,include category_id£¬score£¬coordinate¡£
+* 
+* nBoxesNum£º number of box
+* 
+* LabelList: label list of result
+*/
+extern "C" __declspec(dllexport) void ModelPredict(const unsigned char* img, int nWidth, int nHeight,int nChannel, float* output, int* nBoxesNum, char* LabelList)
+{
+	// prepare data
+	std::vector<cv::Mat> imgs;
+
+	int nType = 0;
+	if (nChannel==1)
+	{
+		nType = CV_8UC1;
+	}
+	else if (nChannel == 2)
+	{
+		nType = CV_8UC2;
+	}
+	else if (nChannel == 3)
+	{
+		nType = CV_8UC3;
+	}
+	else if (nChannel == 4)
+	{
+		nType = CV_8UC4;
+	}
+
+	cv::Mat input = cv::Mat::zeros(cv::Size(nWidth, nHeight), nType);
+	memcpy(input.data, img, nHeight * nWidth * nChannel * sizeof(uchar));
+	//cv::imwrite("./1.png", input);
+	imgs.push_back(std::move(input));
+
+	// predict
+	std::vector<PaddleDeploy::Result> results;
+	bool pre = model->Predict(imgs, &results, 1);
+	if (pre)
+	{
+		std::cout << "model predict success" << std::endl;
+	}
+	nBoxesNum[0] = results.size();
+	std::string label ="";
+	for (int num = 0; num < results.size(); num++)
+	{
+		//std::cout << "res: " << results[num] << std::endl;
+		for (int i = 0; i < results[num].det_result->boxes.size(); i++)
+		{
+			//std::cout << "category: " << results[num].det_result->boxes[i].category << std::endl;
+			label = label + results[num].det_result->boxes[i].category+ " ";
+			// labelindex
+			output[num * 6 + 0] = results[num].det_result->boxes[i].category_id;
+			// score
+			output[num * 6 + 1] = results[num].det_result->boxes[i].score;
+			//// box
+			output[num * 6 + 2] = results[num].det_result->boxes[i].coordinate[0];
+			output[num * 6 + 3] = results[num].det_result->boxes[i].coordinate[1];
+			output[num * 6 + 4] = results[num].det_result->boxes[i].coordinate[2];
+			output[num * 6 + 5] = results[num].det_result->boxes[i].coordinate[3];						
+		}
+	}
+	memcpy(LabelList, label.c_str(), strlen(label.c_str()));
+}
+
+extern "C" __declspec(dllexport) void DestructModel()
+{
+	delete model;
+	std::cout << "destruct model success" << std::endl;
+
+}
+
+//DEFINE_string(model_filename, "", "Path of det inference model");
+//DEFINE_string(params_filename, "", "Path of det inference params");
+//DEFINE_string(cfg_file, "", "Path of yaml file");
+//DEFINE_string(model_type, "", "model type");
+//DEFINE_string(image, "", "Path of test image file");
+//DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
+//DEFINE_int32(gpu_id, 0, "GPU card id");
+//
+//int main(int argc, char** argv) {
+//  // Parsing command-line
+//  google::ParseCommandLineFlags(&argc, &argv, true);
+//
+//  // create model
+//  PaddleDeploy::Model* model = PaddleDeploy::CreateModel(FLAGS_model_type);
+//
+//  // model init
+//  model->Init(FLAGS_cfg_file);
+//
+//  // inference engine init
+//  PaddleDeploy::PaddleEngineConfig engine_config;
+//  engine_config.model_filename = FLAGS_model_filename;
+//  engine_config.params_filename = FLAGS_params_filename;
+//  engine_config.use_gpu = FLAGS_use_gpu;
+//  engine_config.gpu_id = FLAGS_gpu_id;
+//  model->PaddleEngineInit(engine_config);
+//
+//  // prepare data
+//  std::vector<cv::Mat> imgs;
+//  imgs.push_back(std::move(cv::imread(FLAGS_image)));
+//
+//  // predict
+//  std::vector<PaddleDeploy::Result> results;
+//  model->Predict(imgs, &results, 1);
+//
+//  std::cout << results[0] << std::endl;
+//  delete model;
+//  return 0;
+//}