paddlex.h 7.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <functional>
  16. #include <iostream>
  17. #include <numeric>
  18. #include "yaml-cpp/yaml.h"
  19. #ifdef _WIN32
  20. #define OS_PATH_SEP "\\"
  21. #else
  22. #define OS_PATH_SEP "/"
  23. #endif
  24. #include "paddle_inference_api.h" // NOLINT
  25. #include "config_parser.h"
  26. #include "results.h"
  27. #include "transforms.h"
  28. #ifdef WITH_ENCRYPTION
  29. #include "paddle_model_decrypt.h"
  30. #include "model_code.h"
  31. #endif
  32. namespace PaddleX {
  33. /*
  34. * @brief
  35. * This class encapsulates all necessary proccess steps of model infering, which
  36. * include image matrix preprocessing, model predicting and results postprocessing.
  37. * The entire process of model infering can be simplified as below:
  38. * 1. preprocess image matrix (resize, padding, ......)
  39. * 2. model infer
  40. * 3. postprocess the results which generated from model infering
  41. *
  42. * @example
  43. * PaddleX::Model cls_model;
  44. * // initialize model configuration
  45. * cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
  46. * // define a Classification result object
  47. * PaddleX::ClsResult cls_result;
  48. * // get image matrix from image file
  49. * cv::Mat im = cv::imread(image_file_path, 1);
  50. * cls_model.predict(im, &cls_result);
  51. * */
  52. class Model {
  53. public:
  54. /*
  55. * @brief
  56. * This method aims to initialize the model configuration
  57. *
  58. * @param model_dir: the directory which contains model.yml
  59. * @param use_gpu: use gpu or not when infering
  60. * @param use_trt: use Tensor RT or not when infering
  61. * @param gpu_id: the id of gpu when infering with using gpu
  62. * @param key: the key of encryption when using encrypted model
  63. * @param batch_size: batch size of infering
  64. * */
  65. void Init(const std::string& model_dir,
  66. bool use_gpu = false,
  67. bool use_trt = false,
  68. int gpu_id = 0,
  69. std::string key = "",
  70. int batch_size = 1) {
  71. create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
  72. }
  73. void create_predictor(const std::string& model_dir,
  74. bool use_gpu = false,
  75. bool use_trt = false,
  76. int gpu_id = 0,
  77. std::string key = "",
  78. int batch_size = 1);
  79. /*
  80. * @brief
  81. * This method aims to load model configurations which include
  82. * transform steps and label list
  83. *
  84. * @param model_dir: the directory which contains model.yml
  85. * @return true if load configuration successfully
  86. * */
  87. bool load_config(const std::string& model_dir);
  88. /*
  89. * @brief
  90. * This method aims to transform single image matrix, the result will be
  91. * returned at second parameter.
  92. *
  93. * @param input_im: single image matrix to be transformed
  94. * @param blob: the raw data of single image matrix after transformed
  95. * @return true if preprocess image matrix successfully
  96. * */
  97. bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
  98. /*
  99. * @brief
  100. * This method aims to transform mutiple image matrixs, the result will be
  101. * returned at second parameter.
  102. *
  103. * @param input_im_batch: a batch of image matrixs to be transformed
  104. * @param blob_blob: raw data of a batch of image matrixs after transformed
  105. * @param thread_num: the number of preprocessing threads,
  106. * each thread run preprocess on single image matrix
  107. * @return true if preprocess a batch of image matrixs successfully
  108. * */
  109. bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch, int thread_num = 1);
  110. /*
  111. * @brief
  112. * This method aims to execute classification model prediction on single image matrix,
  113. * the result will be returned at second parameter.
  114. *
  115. * @param im: single image matrix to be predicted
  116. * @param result: classification prediction result data after postprocessed
  117. * @return true if predict successfully
  118. * */
  119. bool predict(const cv::Mat& im, ClsResult* result);
  120. /*
  121. * @brief
  122. * This method aims to execute classification model prediction on a batch of image matrixs,
  123. * the result will be returned at second parameter.
  124. *
  125. * @param im: a batch of image matrixs to be predicted
  126. * @param results: a batch of classification prediction result data after postprocessed
  127. * @param thread_num: the number of predicting threads, each thread run prediction
  128. * on single image matrix
  129. * @return true if predict successfully
  130. * */
  131. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results, int thread_num = 1);
  132. /*
  133. * @brief
  134. * This method aims to execute detection or instance segmentation model prediction
  135. * on single image matrix, the result will be returned at second parameter.
  136. *
  137. * @param im: single image matrix to be predicted
  138. * @param result: detection or instance segmentation prediction result data after postprocessed
  139. * @return true if predict successfully
  140. * */
  141. bool predict(const cv::Mat& im, DetResult* result);
  142. /*
  143. * @brief
  144. * This method aims to execute detection or instance segmentation model prediction
  145. * on a batch of image matrixs, the result will be returned at second parameter.
  146. *
  147. * @param im: a batch of image matrix to be predicted
  148. * @param result: detection or instance segmentation prediction result data after postprocessed
  149. * @param thread_num: the number of predicting threads, each thread run prediction
  150. * on single image matrix
  151. * @return true if predict successfully
  152. * */
  153. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result, int thread_num = 1);
  154. /*
  155. * @brief
  156. * This method aims to execute segmentation model prediction on single image matrix,
  157. * the result will be returned at second parameter.
  158. *
  159. * @param im: single image matrix to be predicted
  160. * @param result: segmentation prediction result data after postprocessed
  161. * @return true if predict successfully
  162. * */
  163. bool predict(const cv::Mat& im, SegResult* result);
  164. /*
  165. * @brief
  166. * This method aims to execute segmentation model prediction on a batch of image matrix,
  167. * the result will be returned at second parameter.
  168. *
  169. * @param im: a batch of image matrix to be predicted
  170. * @param result: segmentation prediction result data after postprocessed
  171. * @param thread_num: the number of predicting threads, each thread run prediction
  172. * on single image matrix
  173. * @return true if predict successfully
  174. * */
  175. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result, int thread_num = 1);
  176. // model type, include 3 type: classifier, detector, segmenter
  177. std::string type;
  178. // model name, such as FasterRCNN, YOLOV3 and so on.
  179. std::string name;
  180. std::map<int, std::string> labels;
  181. // transform(preprocessing) pipeline manager
  182. Transforms transforms_;
  183. // single input preprocessed data
  184. ImageBlob inputs_;
  185. // batch input preprocessed data
  186. std::vector<ImageBlob> inputs_batch_;
  187. // raw data of predicting results
  188. std::vector<float> outputs_;
  189. // a predictor which run the model predicting
  190. std::unique_ptr<paddle::PaddlePredictor> predictor_;
  191. };
  192. } // namespce of PaddleX