Browse Source

remove batch_size args

jack 5 năm trước cách đây
mục cha
commit
23a624408a
2 tập tin đã thay đổi với 7 bổ sung9 xóa
  1. 3 6
      deploy/cpp/include/paddlex/paddlex.h
  2. 4 3
      deploy/cpp/src/paddlex.cpp

+ 3 - 6
deploy/cpp/include/paddlex/paddlex.h

@@ -72,23 +72,20 @@ class Model {
    * @param use_trt: use Tensor RT or not when infering
    * @param gpu_id: the id of gpu when infering with using gpu
    * @param key: the key of encryption when using encrypted model
-   * @param batch_size: batch size of infering
    * */
   void Init(const std::string& model_dir,
             bool use_gpu = false,
             bool use_trt = false,
             int gpu_id = 0,
-            std::string key = "",
-            int batch_size = 1) {
-    create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
+            std::string key = "") {
+    create_predictor(model_dir, use_gpu, use_trt, gpu_id, key);
   }
 
   void create_predictor(const std::string& model_dir,
                         bool use_gpu = false,
                         bool use_trt = false,
                         int gpu_id = 0,
-                        std::string key = "",
-                        int batch_size = 1);
+                        std::string key = "");
 
   /*
    * @brief

+ 4 - 3
deploy/cpp/src/paddlex.cpp

@@ -22,8 +22,7 @@ void Model::create_predictor(const std::string& model_dir,
                              bool use_gpu,
                              bool use_trt,
                              int gpu_id,
-                             std::string key,
-                             int batch_size) {
+                             std::string key) {
   paddle::AnalysisConfig config;
   std::string model_file = model_dir + OS_PATH_SEP + "__model__";
   std::string params_file = model_dir + OS_PATH_SEP + "__params__";
@@ -76,7 +75,6 @@ void Model::create_predictor(const std::string& model_dir,
         false /* use_calib_mode*/);
   }
   predictor_ = std::move(CreatePaddlePredictor(config));
-  inputs_batch_.assign(batch_size, ImageBlob());
 }
 
 bool Model::load_config(const std::string& yaml_input) {
@@ -192,6 +190,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
                  "to function predict()!" << std::endl;
     return false;
   }
+  inputs_batch_.assign(im_batch.size(), ImageBlob());
   // 处理输入图像
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
     std::cerr << "Preprocess failed!" << std::endl;
@@ -356,6 +355,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
     return false;
   }
 
+  inputs_batch_.assign(im_batch.size(), ImageBlob());
   int batch_size = im_batch.size();
   // 处理输入图像
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
@@ -637,6 +637,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
   }
 
   // 处理输入图像
+  inputs_batch_.assign(im_batch.size(), ImageBlob());
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;