paddlex.h 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <functional>
  16. #include <iostream>
  17. #include <map>
  18. #include <memory>
  19. #include <numeric>
  20. #include <string>
  21. #include <vector>
  22. #include "yaml-cpp/yaml.h"
  23. #ifdef _WIN32
  24. #define OS_PATH_SEP "\\"
  25. #else
  26. #define OS_PATH_SEP "/"
  27. #endif
  28. #include "paddle_inference_api.h" // NOLINT
  29. #include "config_parser.h" // NOLINT
  30. #include "results.h" // NOLINT
  31. #include "transforms.h" // NOLINT
  32. #ifdef WITH_ENCRYPTION
  33. #include "paddle_model_decrypt.h" // NOLINT
  34. #include "model_code.h" // NOLINT
  35. #endif
  36. namespace PaddleX {
  37. /*
  38. * @brief
  39. * This class encapsulates all necessary proccess steps of model infering, which
  40. * include image matrix preprocessing, model predicting and results postprocessing.
  41. * The entire process of model infering can be simplified as below:
  42. * 1. preprocess image matrix (resize, padding, ......)
  43. * 2. model infer
  44. * 3. postprocess the results which generated from model infering
  45. *
  46. * @example
  47. * PaddleX::Model cls_model;
  48. * // initialize model configuration
  49. * cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
  50. * // define a Classification result object
  51. * PaddleX::ClsResult cls_result;
  52. * // get image matrix from image file
  53. * cv::Mat im = cv::imread(image_file_path, 1);
  54. * cls_model.predict(im, &cls_result);
  55. * */
  56. class Model {
  57. public:
  58. /*
  59. * @brief
  60. * This method aims to initialize the model configuration
  61. *
  62. * @param model_dir: the directory which contains model.yml
  63. * @param use_gpu: use gpu or not when infering
  64. * @param use_trt: use Tensor RT or not when infering
  65. * @param gpu_id: the id of gpu when infering with using gpu
  66. * @param key: the key of encryption when using encrypted model
  67. * @param use_ir_optim: use ir optimization when infering
  68. * */
  69. void Init(const std::string& model_dir,
  70. bool use_gpu = false,
  71. bool use_trt = false,
  72. bool use_mkl = true,
  73. int gpu_id = 0,
  74. std::string key = "",
  75. int thread_num = 1,
  76. bool use_ir_optim = true) {
  77. create_predictor(
  78. model_dir,
  79. use_gpu,
  80. use_trt,
  81. use_mkl,
  82. gpu_id,
  83. key,
  84. thread_num,
  85. use_ir_optim);
  86. }
  87. void create_predictor(const std::string& model_dir,
  88. bool use_gpu = false,
  89. bool use_trt = false,
  90. bool use_mkl = true,
  91. int gpu_id = 0,
  92. std::string key = "",
  93. int thread_num = 1,
  94. bool use_ir_optim = true);
  95. /*
  96. * @brief
  97. * This method aims to load model configurations which include
  98. * transform steps and label list
  99. *
  100. * @param yaml_input: model configuration string
  101. * @return true if load configuration successfully
  102. * */
  103. bool load_config(const std::string& yaml_input);
  104. /*
  105. * @brief
  106. * This method aims to transform single image matrix, the result will be
  107. * returned at second parameter.
  108. *
  109. * @param input_im: single image matrix to be transformed
  110. * @param blob: the raw data of single image matrix after transformed
  111. * @return true if preprocess image matrix successfully
  112. * */
  113. bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
  114. /*
  115. * @brief
  116. * This method aims to transform mutiple image matrixs, the result will be
  117. * returned at second parameter.
  118. *
  119. * @param input_im_batch: a batch of image matrixs to be transformed
  120. * @param blob_blob: raw data of a batch of image matrixs after transformed
  121. * @param thread_num: the number of preprocessing threads,
  122. * each thread run preprocess on single image matrix
  123. * @return true if preprocess a batch of image matrixs successfully
  124. * */
  125. bool preprocess(const std::vector<cv::Mat> &input_im_batch,
  126. std::vector<ImageBlob> *blob_batch,
  127. int thread_num = 1);
  128. /*
  129. * @brief
  130. * This method aims to execute classification model prediction on single image matrix,
  131. * the result will be returned at second parameter.
  132. *
  133. * @param im: single image matrix to be predicted
  134. * @param result: classification prediction result data after postprocessed
  135. * @return true if predict successfully
  136. * */
  137. bool predict(const cv::Mat& im, ClsResult* result);
  138. /*
  139. * @brief
  140. * This method aims to execute classification model prediction on a batch of image matrixs,
  141. * the result will be returned at second parameter.
  142. *
  143. * @param im: a batch of image matrixs to be predicted
  144. * @param results: a batch of classification prediction result data after postprocessed
  145. * @param thread_num: the number of predicting threads, each thread run prediction
  146. * on single image matrix
  147. * @return true if predict successfully
  148. * */
  149. bool predict(const std::vector<cv::Mat> &im_batch,
  150. std::vector<ClsResult> *results,
  151. int thread_num = 1);
  152. /*
  153. * @brief
  154. * This method aims to execute detection or instance segmentation model prediction
  155. * on single image matrix, the result will be returned at second parameter.
  156. *
  157. * @param im: single image matrix to be predicted
  158. * @param result: detection or instance segmentation prediction result data after postprocessed
  159. * @return true if predict successfully
  160. * */
  161. bool predict(const cv::Mat& im, DetResult* result);
  162. /*
  163. * @brief
  164. * This method aims to execute detection or instance segmentation model prediction
  165. * on a batch of image matrixs, the result will be returned at second parameter.
  166. *
  167. * @param im: a batch of image matrix to be predicted
  168. * @param result: detection or instance segmentation prediction result data after postprocessed
  169. * @param thread_num: the number of predicting threads, each thread run prediction
  170. * on single image matrix
  171. * @return true if predict successfully
  172. * */
  173. bool predict(const std::vector<cv::Mat> &im_batch,
  174. std::vector<DetResult> *results,
  175. int thread_num = 1);
  176. /*
  177. * @brief
  178. * This method aims to execute segmentation model prediction on single image matrix,
  179. * the result will be returned at second parameter.
  180. *
  181. * @param im: single image matrix to be predicted
  182. * @param result: segmentation prediction result data after postprocessed
  183. * @return true if predict successfully
  184. * */
  185. bool predict(const cv::Mat& im, SegResult* result);
  186. /*
  187. * @brief
  188. * This method aims to execute segmentation model prediction on a batch of image matrix,
  189. * the result will be returned at second parameter.
  190. *
  191. * @param im: a batch of image matrix to be predicted
  192. * @param result: segmentation prediction result data after postprocessed
  193. * @param thread_num: the number of predicting threads, each thread run prediction
  194. * on single image matrix
  195. * @return true if predict successfully
  196. * */
  197. bool predict(const std::vector<cv::Mat> &im_batch,
  198. std::vector<SegResult> *results,
  199. int thread_num = 1);
  200. // model type, include 3 type: classifier, detector, segmenter
  201. std::string type;
  202. // model name, such as FasterRCNN, YOLOV3 and so on.
  203. std::string name;
  204. std::map<int, std::string> labels;
  205. // transform(preprocessing) pipeline manager
  206. Transforms transforms_;
  207. // single input preprocessed data
  208. ImageBlob inputs_;
  209. // batch input preprocessed data
  210. std::vector<ImageBlob> inputs_batch_;
  211. // raw data of predicting results
  212. std::vector<float> outputs_;
  213. // a predictor which run the model predicting
  214. std::unique_ptr<paddle::PaddlePredictor> predictor_;
  215. };
  216. } // namespace PaddleX