Răsfoiți Sursa

add comment in paddlex.h

jack 5 ani în urmă
părinte
comite
d8fe85c4b0
2 a modificat fișierele cu 129 adăugiri și 8 ștergeri
  1. 129 4
      deploy/cpp/include/paddlex/paddlex.h
  2. 0 4
      deploy/cpp/src/paddlex.cpp

+ 129 - 4
deploy/cpp/include/paddlex/paddlex.h

@@ -39,14 +39,44 @@
 
 namespace PaddleX {
 
+/*
+ * @brief
+ * This class encapsulates all necessary proccess steps of model infering, which 
+ * include image matrix preprocessing, model predicting and results postprocessing. 
+ * The entire process of model infering can be simplified as below:
+ * 1. preprocess image matrix (resize, padding, ......)
+ * 2. model infer
+ * 3. postprocess the results which generated from model infering
+ *
+ * @example
+ *  PaddleX::Model cls_model;
+ *  // initialize model configuration
+ *  cls_model.Init(cls_model_dir, use_gpu, use_trt, gpu_id, encryption_key);
+ *  // define a Classification result object
+ *  PaddleX::ClsResult cls_result;
+ *  // get image matrix from image file
+ *  cv::Mat im = cv::imread(image_file_path, 1);
+ *  cls_model.predict(im, &cls_result);
+ * */
 class Model {
  public:
+  /*
+   * @brief
+   * This method aims to initialize the model configuration
+   * 
+   * @param model_dir: the directory which contains model.yml
+   * @param use_gpu: use gpu or not when infering
+   * @param use_trt: use Tensor RT or not when infering
+   * @param gpu_id: the id of gpu when infering with using gpu 
+   * @param key: the key of encryption when using encrypted model
+   * @param batch_size: batch size of infering
+   * */
   void Init(const std::string& model_dir,
             bool use_gpu = false,
             bool use_trt = false,
             int gpu_id = 0,
             std::string key = "",
-	    int batch_size = 1) {
+	          int batch_size = 1) {
     create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
   }
 
@@ -55,33 +85,128 @@ class Model {
                         bool use_trt = false,
                         int gpu_id = 0,
                         std::string key = "",
-			int batch_size = 1);
-
+			                  int batch_size = 1);
+ 
+  /*
+   * @brief 
+   * This method aims to load model configurations which include 
+   * transform steps and label list
+   *
+   * @param model_dir: the directory which contains model.yml
+   * @return true if load configuration successfully
+   * */
   bool load_config(const std::string& model_dir);
 
+  /*
+   * @brief
+   * This method aims to transform single image matrix, the result will be
+   * returned at second parameter.
+   *
+   * @param input_im: single image matrix to be transformed
+   * @param blob: the raw data of single image matrix after transformed
+   * @return true if preprocess image matrix successfully
+   * */
   bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
   
+  /*
+   * @brief
+   * This method aims to transform mutiple image matrixs, the result will be
+   * returned at second parameter.
+   *
+   * @param input_im_batch: a batch of image matrixs to be transformed
+   * @param blob_blob: raw data of a batch of image matrixs after transformed
+   * @param thread_num: the number of preprocessing threads, 
+   *                    each thread run preprocess on single image matrix
+   * @return true if preprocess a batch of image matrixs successfully
+   * */
   bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch, int thread_num = 1);
 
+  /*
+   * @brief
+   * This method aims to execute classification model prediction on single image matrix, 
+   * the result will be returned at second parameter.
+   *
+   * @param im: single image matrix to be predicted
+   * @param result: classification prediction result data after postprocessed
+   * @return true if predict successfully
+   * */
   bool predict(const cv::Mat& im, ClsResult* result);
 
+  /*
+   * @brief
+   * This method aims to execute classification model prediction on a batch of image matrixs, 
+   * the result will be returned at second parameter.
+   *
+   * @param im: a batch of image matrixs to be predicted
+   * @param results: a batch of classification prediction result data after postprocessed
+   * @param thread_num: the number of predicting threads, each thread run prediction
+   *                    on single image matrix
+   * @return true if predict successfully
+   * */
   bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results, int thread_num = 1);
 
+  /*
+   * @brief
+   * This method aims to execute detection or instance segmentation model prediction
+   * on single image matrix, the result will be returned at second parameter.
+   *
+   * @param im: single image matrix to be predicted
+   * @param result: detection or instance segmentation prediction result data after postprocessed
+   * @return true if predict successfully
+   * */
   bool predict(const cv::Mat& im, DetResult* result);
 
+  /*
+   * @brief
+   * This method aims to execute detection or instance segmentation model prediction
+   * on a batch of image matrixs, the result will be returned at second parameter.
+   *
+   * @param im: a batch of image matrix to be predicted
+   * @param result: detection or instance segmentation prediction result data after postprocessed
+   * @param thread_num: the number of predicting threads, each thread run prediction
+   *                    on single image matrix
+   * @return true if predict successfully
+   * */
   bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result, int thread_num = 1);
   
+  /*
+   * @brief
+   * This method aims to execute segmentation model prediction on single image matrix, 
+   * the result will be returned at second parameter.
+   *
+   * @param im: single image matrix to be predicted
+   * @param result: segmentation prediction result data after postprocessed
+   * @return true if predict successfully
+   * */
   bool predict(const cv::Mat& im, SegResult* result);
 
+  /*
+   * @brief
+   * This method aims to execute segmentation model prediction on a batch of image matrix, 
+   * the result will be returned at second parameter.
+   *
+   * @param im: a batch of image matrix to be predicted
+   * @param result: segmentation prediction result data after postprocessed
+   * @param thread_num: the number of predicting threads, each thread run prediction
+   *                    on single image matrix
+   * @return true if predict successfully
+   * */
   bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result, int thread_num = 1);
-  
+ 
+  // model type, include 3 type: classifier, detector, segmenter
   std::string type;
+  // model name, such as FasterRCNN, YOLOV3 and so on.
   std::string name;
   std::map<int, std::string> labels;
+  // transform(preprocessing) pipeline manager
   Transforms transforms_;
+  // single input preprocessed data
   ImageBlob inputs_;
+  // batch input preprocessed data
   std::vector<ImageBlob> inputs_batch_;
+  // raw data of predicting results
   std::vector<float> outputs_;
+  // a predictor which run the model predicting
   std::unique_ptr<paddle::PaddlePredictor> predictor_;
 };
 }  // namespce of PaddleX

+ 0 - 4
deploy/cpp/src/paddlex.cpp

@@ -101,8 +101,6 @@ bool Model::load_config(const std::string& model_dir) {
 
 bool Model::preprocess(const cv::Mat& input_im, ImageBlob* blob) {
   cv::Mat im = input_im.clone();
-  int max_h = im.rows;
-  int max_w = im.cols;
   if (!transforms_.Run(&im, blob)) {
     return false;
   }
@@ -113,8 +111,6 @@ bool Model::preprocess(const cv::Mat& input_im, ImageBlob* blob) {
 bool Model::preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch, int thread_num) {
   int batch_size = input_im_batch.size();
   bool success = true;
-  int max_h = -1;
-  int max_w = -1;
   thread_num = std::min(thread_num, batch_size);
   #pragma omp parallel for num_threads(thread_num)
   for(int i = 0; i < input_im_batch.size(); ++i) {