paddlex.h 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <functional>
  16. #include <iostream>
  17. #include <map>
  18. #include <memory>
  19. #include <numeric>
  20. #include <string>
  21. #include <vector>
  22. #include "yaml-cpp/yaml.h"
  23. #ifdef _WIN32
  24. #define OS_PATH_SEP "\\"
  25. #else
  26. #define OS_PATH_SEP "/"
  27. #endif
  28. #include "paddle_inference_api.h" // NOLINT
  29. #include "config_parser.h" // NOLINT
  30. #include "results.h" // NOLINT
  31. #include "transforms.h" // NOLINT
  32. #ifdef WITH_ENCRYPTION
  33. #include "paddle_model_decrypt.h" // NOLINT
  34. #include "model_code.h" // NOLINT
  35. #endif
  36. namespace PaddleX {
  37. /*
  38. * @brief
  39. * This class encapsulates all necessary proccess steps of model infering, which
  40. * include image matrix preprocessing, model predicting and results postprocessing.
  41. * The entire process of model infering can be simplified as below:
  42. * 1. preprocess image matrix (resize, padding, ......)
  43. * 2. model infer
  44. * 3. postprocess the results which generated from model infering
  45. *
  46. * @example
  47. * PaddleX::Model cls_model;
  48. * // initialize model configuration
  49. * cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
  50. * // define a Classification result object
  51. * PaddleX::ClsResult cls_result;
  52. * // get image matrix from image file
  53. * cv::Mat im = cv::imread(image_file_path, 1);
  54. * cls_model.predict(im, &cls_result);
  55. * */
  56. class Model {
  57. public:
  58. /*
  59. * @brief
  60. * This method aims to initialize the model configuration
  61. *
  62. * @param model_dir: the directory which contains model.yml
  63. * @param use_gpu: use gpu or not when infering
  64. * @param use_trt: use Tensor RT or not when infering
  65. * @param gpu_id: the id of gpu when infering with using gpu
  66. * @param key: the key of encryption when using encrypted model
  67. * @param batch_size: batch size of infering
  68. * */
  69. void Init(const std::string& model_dir,
  70. bool use_gpu = false,
  71. bool use_trt = false,
  72. int gpu_id = 0,
  73. std::string key = "",
  74. int batch_size = 1) {
  75. create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
  76. }
  77. void create_predictor(const std::string& model_dir,
  78. bool use_gpu = false,
  79. bool use_trt = false,
  80. int gpu_id = 0,
  81. std::string key = "",
  82. int batch_size = 1);
  83. /*
  84. * @brief
  85. * This method aims to load model configurations which include
  86. * transform steps and label list
  87. *
  88. * @param model_dir: the directory which contains model.yml
  89. * @return true if load configuration successfully
  90. * */
  91. bool load_config(const std::string& model_dir);
  92. /*
  93. * @brief
  94. * This method aims to transform single image matrix, the result will be
  95. * returned at second parameter.
  96. *
  97. * @param input_im: single image matrix to be transformed
  98. * @param blob: the raw data of single image matrix after transformed
  99. * @return true if preprocess image matrix successfully
  100. * */
  101. bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
  102. /*
  103. * @brief
  104. * This method aims to transform mutiple image matrixs, the result will be
  105. * returned at second parameter.
  106. *
  107. * @param input_im_batch: a batch of image matrixs to be transformed
  108. * @param blob_blob: raw data of a batch of image matrixs after transformed
  109. * @param thread_num: the number of preprocessing threads,
  110. * each thread run preprocess on single image matrix
  111. * @return true if preprocess a batch of image matrixs successfully
  112. * */
  113. bool preprocess(const std::vector<cv::Mat> &input_im_batch,
  114. std::vector<ImageBlob> *blob_batch,
  115. int thread_num = 1);
  116. /*
  117. * @brief
  118. * This method aims to execute classification model prediction on single image matrix,
  119. * the result will be returned at second parameter.
  120. *
  121. * @param im: single image matrix to be predicted
  122. * @param result: classification prediction result data after postprocessed
  123. * @return true if predict successfully
  124. * */
  125. bool predict(const cv::Mat& im, ClsResult* result);
  126. /*
  127. * @brief
  128. * This method aims to execute classification model prediction on a batch of image matrixs,
  129. * the result will be returned at second parameter.
  130. *
  131. * @param im: a batch of image matrixs to be predicted
  132. * @param results: a batch of classification prediction result data after postprocessed
  133. * @param thread_num: the number of predicting threads, each thread run prediction
  134. * on single image matrix
  135. * @return true if predict successfully
  136. * */
  137. bool predict(const std::vector<cv::Mat> &im_batch,
  138. std::vector<ClsResult> *results,
  139. int thread_num = 1);
  140. /*
  141. * @brief
  142. * This method aims to execute detection or instance segmentation model prediction
  143. * on single image matrix, the result will be returned at second parameter.
  144. *
  145. * @param im: single image matrix to be predicted
  146. * @param result: detection or instance segmentation prediction result data after postprocessed
  147. * @return true if predict successfully
  148. * */
  149. bool predict(const cv::Mat& im, DetResult* result);
  150. /*
  151. * @brief
  152. * This method aims to execute detection or instance segmentation model prediction
  153. * on a batch of image matrixs, the result will be returned at second parameter.
  154. *
  155. * @param im: a batch of image matrix to be predicted
  156. * @param result: detection or instance segmentation prediction result data after postprocessed
  157. * @param thread_num: the number of predicting threads, each thread run prediction
  158. * on single image matrix
  159. * @return true if predict successfully
  160. * */
  161. bool predict(const std::vector<cv::Mat> &im_batch,
  162. std::vector<DetResult> *result,
  163. int thread_num = 1);
  164. /*
  165. * @brief
  166. * This method aims to execute segmentation model prediction on single image matrix,
  167. * the result will be returned at second parameter.
  168. *
  169. * @param im: single image matrix to be predicted
  170. * @param result: segmentation prediction result data after postprocessed
  171. * @return true if predict successfully
  172. * */
  173. bool predict(const cv::Mat& im, SegResult* result);
  174. /*
  175. * @brief
  176. * This method aims to execute segmentation model prediction on a batch of image matrix,
  177. * the result will be returned at second parameter.
  178. *
  179. * @param im: a batch of image matrix to be predicted
  180. * @param result: segmentation prediction result data after postprocessed
  181. * @param thread_num: the number of predicting threads, each thread run prediction
  182. * on single image matrix
  183. * @return true if predict successfully
  184. * */
  185. bool predict(const std::vector<cv::Mat> &im_batch,
  186. std::vector<SegResult> *result,
  187. int thread_num = 1);
  188. // model type, include 3 type: classifier, detector, segmenter
  189. std::string type;
  190. // model name, such as FasterRCNN, YOLOV3 and so on.
  191. std::string name;
  192. std::map<int, std::string> labels;
  193. // transform(preprocessing) pipeline manager
  194. Transforms transforms_;
  195. // single input preprocessed data
  196. ImageBlob inputs_;
  197. // batch input preprocessed data
  198. std::vector<ImageBlob> inputs_batch_;
  199. // raw data of predicting results
  200. std::vector<float> outputs_;
  201. // a predictor which run the model predicting
  202. std::unique_ptr<paddle::PaddlePredictor> predictor_;
  203. };
  204. } // namespace PaddleX