paddlex.h 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <functional>
  16. #include <iostream>
  17. #include <map>
  18. #include <memory>
  19. #include <numeric>
  20. #include <string>
  21. #include <vector>
  22. #include "yaml-cpp/yaml.h"
  23. #ifdef _WIN32
  24. #define OS_PATH_SEP "\\"
  25. #else
  26. #define OS_PATH_SEP "/"
  27. #endif
  28. #include "paddle_inference_api.h" // NOLINT
  29. #include "config_parser.h" // NOLINT
  30. #include "results.h" // NOLINT
  31. #include "transforms.h" // NOLINT
  32. #ifdef WITH_ENCRYPTION
  33. #include "paddle_model_decrypt.h" // NOLINT
  34. #include "model_code.h" // NOLINT
  35. #endif
  36. namespace PaddleX {
  37. /*
  38. * @brief
  39. * This class encapsulates all necessary proccess steps of model infering, which
  40. * include image matrix preprocessing, model predicting and results postprocessing.
  41. * The entire process of model infering can be simplified as below:
  42. * 1. preprocess image matrix (resize, padding, ......)
  43. * 2. model infer
  44. * 3. postprocess the results which generated from model infering
  45. *
  46. * @example
  47. * PaddleX::Model cls_model;
  48. * // initialize model configuration
  49. * cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
  50. * // define a Classification result object
  51. * PaddleX::ClsResult cls_result;
  52. * // get image matrix from image file
  53. * cv::Mat im = cv::imread(image_file_path, 1);
  54. * cls_model.predict(im, &cls_result);
  55. * */
  56. class Model {
  57. public:
  58. /*
  59. * @brief
  60. * This method aims to initialize the model configuration
  61. *
  62. * @param model_dir: the directory which contains model.yml
  63. * @param use_gpu: use gpu or not when infering
  64. * @param use_trt: use Tensor RT or not when infering
  65. * @param use_mkl: use mkl or not when infering
  66. * @param mkl_thread_num: number of threads for mkldnn when infering
  67. * @param gpu_id: the id of gpu when infering with using gpu
  68. * @param key: the key of encryption when using encrypted model
  69. * @param use_ir_optim: use ir optimization when infering
  70. * */
  71. void Init(const std::string& model_dir,
  72. bool use_gpu = false,
  73. bool use_trt = false,
  74. bool use_mkl = true,
  75. int mkl_thread_num = 4,
  76. int gpu_id = 0,
  77. std::string key = "",
  78. bool use_ir_optim = true) {
  79. create_predictor(
  80. model_dir,
  81. use_gpu,
  82. use_trt,
  83. use_mkl,
  84. mkl_thread_num,
  85. gpu_id,
  86. key,
  87. use_ir_optim);
  88. }
  89. void create_predictor(const std::string& model_dir,
  90. bool use_gpu = false,
  91. bool use_trt = false,
  92. bool use_mkl = true,
  93. int mkl_thread_num = 4,
  94. int gpu_id = 0,
  95. std::string key = "",
  96. bool use_ir_optim = true);
  97. /*
  98. * @brief
  99. * This method aims to load model configurations which include
  100. * transform steps and label list
  101. *
  102. * @param yaml_input: model configuration string
  103. * @return true if load configuration successfully
  104. * */
  105. bool load_config(const std::string& yaml_input);
  106. /*
  107. * @brief
  108. * This method aims to transform single image matrix, the result will be
  109. * returned at second parameter.
  110. *
  111. * @param input_im: single image matrix to be transformed
  112. * @param blob: the raw data of single image matrix after transformed
  113. * @return true if preprocess image matrix successfully
  114. * */
  115. bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
  116. /*
  117. * @brief
  118. * This method aims to transform mutiple image matrixs, the result will be
  119. * returned at second parameter.
  120. *
  121. * @param input_im_batch: a batch of image matrixs to be transformed
  122. * @param blob_blob: raw data of a batch of image matrixs after transformed
  123. * @param thread_num: the number of preprocessing threads,
  124. * each thread run preprocess on single image matrix
  125. * @return true if preprocess a batch of image matrixs successfully
  126. * */
  127. bool preprocess(const std::vector<cv::Mat> &input_im_batch,
  128. std::vector<ImageBlob> *blob_batch,
  129. int thread_num = 1);
  130. /*
  131. * @brief
  132. * This method aims to execute classification model prediction on single image matrix,
  133. * the result will be returned at second parameter.
  134. *
  135. * @param im: single image matrix to be predicted
  136. * @param result: classification prediction result data after postprocessed
  137. * @return true if predict successfully
  138. * */
  139. bool predict(const cv::Mat& im, ClsResult* result);
  140. /*
  141. * @brief
  142. * This method aims to execute classification model prediction on a batch of image matrixs,
  143. * the result will be returned at second parameter.
  144. *
  145. * @param im: a batch of image matrixs to be predicted
  146. * @param results: a batch of classification prediction result data after postprocessed
  147. * @param thread_num: the number of predicting threads, each thread run prediction
  148. * on single image matrix
  149. * @return true if predict successfully
  150. * */
  151. bool predict(const std::vector<cv::Mat> &im_batch,
  152. std::vector<ClsResult> *results,
  153. int thread_num = 1);
  154. /*
  155. * @brief
  156. * This method aims to execute detection or instance segmentation model prediction
  157. * on single image matrix, the result will be returned at second parameter.
  158. *
  159. * @param im: single image matrix to be predicted
  160. * @param result: detection or instance segmentation prediction result data after postprocessed
  161. * @return true if predict successfully
  162. * */
  163. bool predict(const cv::Mat& im, DetResult* result);
  164. /*
  165. * @brief
  166. * This method aims to execute detection or instance segmentation model prediction
  167. * on a batch of image matrixs, the result will be returned at second parameter.
  168. *
  169. * @param im: a batch of image matrix to be predicted
  170. * @param result: detection or instance segmentation prediction result data after postprocessed
  171. * @param thread_num: the number of predicting threads, each thread run prediction
  172. * on single image matrix
  173. * @return true if predict successfully
  174. * */
  175. bool predict(const std::vector<cv::Mat> &im_batch,
  176. std::vector<DetResult> *results,
  177. int thread_num = 1);
  178. /*
  179. * @brief
  180. * This method aims to execute segmentation model prediction on single image matrix,
  181. * the result will be returned at second parameter.
  182. *
  183. * @param im: single image matrix to be predicted
  184. * @param result: segmentation prediction result data after postprocessed
  185. * @return true if predict successfully
  186. * */
  187. bool predict(const cv::Mat& im, SegResult* result);
  188. /*
  189. * @brief
  190. * This method aims to execute segmentation model prediction on a batch of image matrix,
  191. * the result will be returned at second parameter.
  192. *
  193. * @param im: a batch of image matrix to be predicted
  194. * @param result: segmentation prediction result data after postprocessed
  195. * @param thread_num: the number of predicting threads, each thread run prediction
  196. * on single image matrix
  197. * @return true if predict successfully
  198. * */
  199. bool predict(const std::vector<cv::Mat> &im_batch,
  200. std::vector<SegResult> *results,
  201. int thread_num = 1);
  202. // model type, include 3 type: classifier, detector, segmenter
  203. std::string type;
  204. // model name, such as FasterRCNN, YOLOV3 and so on.
  205. std::string name;
  206. std::map<int, std::string> labels;
  207. // transform(preprocessing) pipeline manager
  208. Transforms transforms_;
  209. // single input preprocessed data
  210. ImageBlob inputs_;
  211. // batch input preprocessed data
  212. std::vector<ImageBlob> inputs_batch_;
  213. // raw data of predicting results
  214. std::vector<float> outputs_;
  215. // a predictor which run the model predicting
  216. std::unique_ptr<paddle::PaddlePredictor> predictor_;
  217. // input channel
  218. int input_channel_;
  219. };
  220. } // namespace PaddleX