Browse Source

optimize code

Channingss 5 years ago
parent
commit
771c670943

+ 2 - 3
deploy/openvino/include/paddlex/paddlex.h

@@ -56,9 +56,8 @@ class Model {
   std::string name;
   std::vector<std::string> labels;
   Transforms transforms_;
-  ImageBlob inputs_;
-  float* outputs_;
-  ExecutableNetwork executable_network_;
+  Blob::Ptr inputs_;
+  Blob::Ptr output_
   CNNNetwork network_;
 };
 }  // namespce of PaddleX

+ 4 - 87
deploy/openvino/include/paddlex/transforms.h

@@ -26,32 +26,10 @@
 #include <opencv2/highgui/highgui.hpp>
 #include <opencv2/imgproc/imgproc.hpp>
 
-namespace PaddleX {
+#include <inference_engine.hpp>
+using namespace InferenceEngine;
 
-// Object for storing all preprocessed data
-class ImageBlob {
- public:
-  // Original image height and width
-  std::vector<int> ori_im_size_ = std::vector<int>(2);
-  // Newest image height and width after process
-  std::vector<int> new_im_size_ = std::vector<int>(2);
-  // Image height and width before resize
-  std::vector<std::vector<int>> im_size_before_resize_;
-  // Reshape order
-  std::vector<std::string> reshape_order_;
-  // Resize scale
-  float scale = 1.0;
-  // Buffer for image data after preprocessing
-  std::vector<float> im_data_;
-
-  void clear() {
-    ori_im_size_.clear();
-    new_im_size_.clear();
-    im_size_before_resize_.clear();
-    reshape_order_.clear();
-    im_data_.clear();
-  }
-};
+namespace PaddleX {
 
 // Abstraction of preprocessing opration class
 class Transform {
@@ -74,34 +52,6 @@ class Normalize : public Transform {
   std::vector<float> std_;
 };
 
-class ResizeByShort : public Transform {
- public:
-  virtual void Init(const YAML::Node& item) {
-    short_size_ = item["short_size"].as<int>();
-    if (item["max_size"].IsDefined()) {
-      max_size_ = item["max_size"].as<int>();
-    } else {
-      max_size_ = -1;
-    }
-  };
-  virtual bool Run(cv::Mat* im, ImageBlob* data);
-
- private:
-  float GenerateScale(const cv::Mat& im);
-  int short_size_;
-  int max_size_;
-};
-
-class ResizeByLong : public Transform {
- public:
-  virtual void Init(const YAML::Node& item) {
-    long_size_ = item["long_size"].as<int>();
-  };
-  virtual bool Run(cv::Mat* im, ImageBlob* data);
-
- private:
-  int long_size_;
-};
 
 class Resize : public Transform {
  public:
@@ -147,44 +97,11 @@ class CenterCrop : public Transform {
   int width_;
 };
 
-class Padding : public Transform {
- public:
-  virtual void Init(const YAML::Node& item) {
-    if (item["coarsest_stride"].IsDefined()) {
-      coarsest_stride_ = item["coarsest_stride"].as<int>();
-      if (coarsest_stride_ < 1) {
-        std::cerr << "[Padding] coarest_stride should greater than 0"
-                  << std::endl;
-        exit(-1);
-      }
-    }
-    if (item["target_size"].IsDefined()) {
-      if (item["target_size"].IsScalar()) {
-        width_ = item["target_size"].as<int>();
-        height_ = item["target_size"].as<int>();
-      } else if (item["target_size"].IsSequence()) {
-        width_ = item["target_size"].as<std::vector<int>>()[0];
-        height_ = item["target_size"].as<std::vector<int>>()[1];
-      }
-    }
-    if (item["im_padding_value"].IsDefined()) {
-      value_ = item["im_padding_value"].as<std::vector<float>>();
-    }
-  }
-  virtual bool Run(cv::Mat* im, ImageBlob* data);
-
- private:
-  int coarsest_stride_ = -1;
-  int width_ = 0;
-  int height_ = 0;
-  std::vector<float> value_;
-};
-
 class Transforms {
  public:
   void Init(const YAML::Node& node, bool to_rgb = true);
   std::shared_ptr<Transform> CreateTransform(const std::string& name);
-  bool Run(cv::Mat* im, ImageBlob* data);
+  bool Run(cv::Mat* im, Blob::ptr data);
 
  private:
   std::vector<std::shared_ptr<Transform>> transforms_;

+ 19 - 51
deploy/openvino/src/paddlex.cpp

@@ -29,42 +29,8 @@ void Model::create_predictor(const std::string& model_dir,
     input_info->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
     input_info->setLayout(Layout::NCHW);
     input_info->setPrecision(Precision::FP32);
-    executable_network_ = ie.LoadNetwork(network_, device);
-    load_config(cfg_dir);
-}
-
-template <typename T>
-void matU8ToBlob(const cv::Mat& orig_image, InferenceEngine::Blob::Ptr& blob, int batchIndex = 0) {
-    InferenceEngine::SizeVector blobSize = blob->getTensorDesc().getDims();
-    const size_t width = blobSize[3];
-    const size_t height = blobSize[2];
-    const size_t channels = blobSize[1];
-    InferenceEngine::MemoryBlob::Ptr mblob = InferenceEngine::as<InferenceEngine::MemoryBlob>(blob);
-    if (!mblob) {
-        THROW_IE_EXCEPTION << "We expect blob to be inherited from MemoryBlob in matU8ToBlob, "
-            << "but by fact we were not able to cast inputBlob to MemoryBlob";
-    }
-    // locked memory holder should be alive all time while access to its buffer happens
-    auto mblobHolder = mblob->wmap();
-
-    T *blob_data = mblobHolder.as<T *>();
 
-    cv::Mat resized_image(orig_image);
-    if (static_cast<int>(width) != orig_image.size().width ||
-            static_cast<int>(height) != orig_image.size().height) {
-        cv::resize(orig_image, resized_image, cv::Size(width, height));
-    }
-
-    int batchOffset = batchIndex * width * height * channels;
-
-    for (size_t c = 0; c < channels; c++) {
-        for (size_t  h = 0; h < height; h++) {
-            for (size_t w = 0; w < width; w++) {
-                blob_data[batchOffset + c * width * height + h * width + w] =
-                        resized_image.at<cv::Vec3f>(h, w)[c];
-            }
-        }
-    }
+    load_config(cfg_dir);
 }
 
 bool Model::load_config(const std::string& cfg_dir) {
@@ -97,7 +63,7 @@ bool Model::preprocess(cv::Mat* input_im, ImageBlob* blob) {
   return true;
 }
 
-bool Model::predict(cv::Mat* im, ClsResult* result) {
+bool Model::predict(const cv::Mat& im, ClsResult* result) {
   inputs_.clear();
   if (type == "detector") {
     std::cerr << "Loading model is a 'detector', DetResult should be passed to "
@@ -111,31 +77,33 @@ bool Model::predict(cv::Mat* im, ClsResult* result) {
     return false;
   }
   // 处理输入图像
-  
-  if (!preprocess(im, &inputs_)) {
+
+  executable_network = ie.LoadNetwork(network_, device);
+  InferRequest infer_request = executable_network.CreateInferRequest();
+  std::string input_name = network_.getInputsInfo().begin()->first;
+  input_ = infer_request.GetBlob(input_name);
+
+  auto im_clone = im.clone();
+  if (!preprocess(&im_clone, inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
-  InferRequest infer_request = executable_network_.CreateInferRequest();
-  std::string input_name = network_.getInputsInfo().begin()->first;
-  //im->convertTo(*im, CV_8UC3);
-  Blob::Ptr input = infer_request.GetBlob(input_name);
-  matU8ToBlob<float>(*im, input, 0);
+
   infer_request.Infer();
+
   std::string output_name = network_.getOutputsInfo().begin()->first;
-  Blob::Ptr output = infer_request.GetBlob(output_name);
+  output_ = infer_request.GetBlob(output_name);
   MemoryBlob::CPtr moutput = as<MemoryBlob>(output);
   auto moutputHolder = moutput->rmap();
-  outputs_ = moutputHolder.as<float *>();
-  
-  std::cout << sizeof(outputs_) << std::endl;
+  float* outputs_data = moutputHolder.as<float *>();
+
   // 对模型输出结果进行后处理
-  auto ptr = std::max_element(outputs_, outputs_+sizeof(outputs_));
-  result->category_id = std::distance(outputs_, ptr);
+  auto ptr = std::max_element(outputs_data, outputs_data+sizeof(outputs_));
+  result->category_id = std::distance(outputs_data, ptr);
   result->score = *ptr;
   result->category = labels[result->category_id];
-  //for (int i=0;i<sizeof(outputs_);i++){
-  //    std::cout <<  labels[i] << std::endl; 
+  //for (int i=0;i<sizeof(outputs_data);i++){
+  //    std::cout <<  labels[i] << std::endl;
   //    std::cout <<  outputs_[i] << std::endl;
   //    }
 }

+ 17 - 98
deploy/openvino/src/transforms.cpp

@@ -40,36 +40,6 @@ bool Normalize::Run(cv::Mat* im, ImageBlob* data) {
   return true;
 }
 
-float ResizeByShort::GenerateScale(const cv::Mat& im) {
-  int origin_w = im.cols;
-  int origin_h = im.rows;
-  int im_size_max = std::max(origin_w, origin_h);
-  int im_size_min = std::min(origin_w, origin_h);
-  float scale =
-      static_cast<float>(short_size_) / static_cast<float>(im_size_min);
-  if (max_size_ > 0) {
-    if (round(scale * im_size_max) > max_size_) {
-      scale = static_cast<float>(max_size_) / static_cast<float>(im_size_max);
-    }
-  }
-  return scale;
-}
-
-bool ResizeByShort::Run(cv::Mat* im, ImageBlob* data) {
-  data->im_size_before_resize_.push_back({im->rows, im->cols});
-  data->reshape_order_.push_back("resize");
-
-  float scale = GenerateScale(*im);
-  int width = static_cast<int>(scale * im->cols);
-  int height = static_cast<int>(scale * im->rows);
-  cv::resize(*im, *im, cv::Size(width, height), 0, 0, cv::INTER_LINEAR);
-
-  data->new_im_size_[0] = im->rows;
-  data->new_im_size_[1] = im->cols;
-  data->scale = scale;
-  return true;
-}
-
 bool CenterCrop::Run(cv::Mat* im, ImageBlob* data) {
   int height = static_cast<int>(im->rows);
   int width = static_cast<int>(im->cols);
@@ -86,55 +56,6 @@ bool CenterCrop::Run(cv::Mat* im, ImageBlob* data) {
   return true;
 }
 
-bool Padding::Run(cv::Mat* im, ImageBlob* data) {
-  data->im_size_before_resize_.push_back({im->rows, im->cols});
-  data->reshape_order_.push_back("padding");
-
-  int padding_w = 0;
-  int padding_h = 0;
-  if (width_ > 1 & height_ > 1) {
-    padding_w = width_ - im->cols;
-    padding_h = height_ - im->rows;
-  } else if (coarsest_stride_ > 1) {
-    padding_h =
-        ceil(im->rows * 1.0 / coarsest_stride_) * coarsest_stride_ - im->rows;
-    padding_w =
-        ceil(im->cols * 1.0 / coarsest_stride_) * coarsest_stride_ - im->cols;
-  }
-
-  if (padding_h < 0 || padding_w < 0) {
-    std::cerr << "[Padding] Computed padding_h=" << padding_h
-              << ", padding_w=" << padding_w
-              << ", but they should be greater than 0." << std::endl;
-    return false;
-  }
-  cv::copyMakeBorder(
-      *im, *im, 0, padding_h, 0, padding_w, cv::BORDER_CONSTANT, cv::Scalar(0));
-  data->new_im_size_[0] = im->rows;
-  data->new_im_size_[1] = im->cols;
-  return true;
-}
-
-bool ResizeByLong::Run(cv::Mat* im, ImageBlob* data) {
-  if (long_size_ <= 0) {
-    std::cerr << "[ResizeByLong] long_size should be greater than 0"
-              << std::endl;
-    return false;
-  }
-  data->im_size_before_resize_.push_back({im->rows, im->cols});
-  data->reshape_order_.push_back("resize");
-  int origin_w = im->cols;
-  int origin_h = im->rows;
-
-  int im_size_max = std::max(origin_w, origin_h);
-  float scale =
-      static_cast<float>(long_size_) / static_cast<float>(im_size_max);
-  cv::resize(*im, *im, cv::Size(), scale, scale, cv::INTER_NEAREST);
-  data->new_im_size_[0] = im->rows;
-  data->new_im_size_[1] = im->cols;
-  data->scale = scale;
-  return true;
-}
 
 bool Resize::Run(cv::Mat* im, ImageBlob* data) {
   if (width_ <= 0 || height_ <= 0) {
@@ -173,16 +94,10 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
     const std::string& transform_name) {
   if (transform_name == "Normalize") {
     return std::make_shared<Normalize>();
-  } else if (transform_name == "ResizeByShort") {
-    return std::make_shared<ResizeByShort>();
   } else if (transform_name == "CenterCrop") {
     return std::make_shared<CenterCrop>();
   } else if (transform_name == "Resize") {
     return std::make_shared<Resize>();
-  } else if (transform_name == "Padding") {
-    return std::make_shared<Padding>();
-  } else if (transform_name == "ResizeByLong") {
-    return std::make_shared<ResizeByLong>();
   } else {
     std::cerr << "There's unexpected transform(name='" << transform_name
               << "')." << std::endl;
@@ -190,16 +105,13 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
   }
 }
 
-bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
+bool Transforms::Run(cv::Mat* im, Blob::ptr data) {
   // 按照transforms中预处理算子顺序处理图像
   if (to_rgb_) {
     cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
   }
   (*im).convertTo(*im, CV_32FC3);
-  data->ori_im_size_[0] = im->rows;
-  data->ori_im_size_[1] = im->cols;
-  data->new_im_size_[0] = im->rows;
-  data->new_im_size_[1] = im->cols;
+
   for (int i = 0; i < transforms_.size(); ++i) {
     if (!transforms_[i]->Run(im, data)) {
       std::cerr << "Apply transforms to image failed!" << std::endl;
@@ -208,14 +120,21 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
   }
 
   // 将图像由NHWC转为NCHW格式
-  // 同时转为连续的内存块存储到ImageBlob
-  int h = im->rows;
-  int w = im->cols;
-  int c = im->channels();
-  (data->im_data_).resize(c * h * w);
-  float* ptr = (data->im_data_).data();
-  for (int i = 0; i < c; ++i) {
-    cv::extractChannel(*im, cv::Mat(h, w, CV_32FC1, ptr + i * h * w), i);
+  // 同时转为连续的内存块存储到Blob
+  SizeVector blobSize = data_->getTensorDesc().getDims();
+  const size_t width = blobSize[3];
+  const size_t height = blobSize[2];
+  const size_t channels = blobSize[1];
+  MemoryBlob::Ptr mblob = InferenceEngine::as<MemoryBlob>(blob);
+  auto mblobHolder = mblob->wmap();
+  float *blob_data = mblobHolder.as<float *>();
+  for (size_t c = 0; c < channels; c++) {
+      for (size_t  h = 0; h < height; h++) {
+          for (size_t w = 0; w < width; w++) {
+              blob_data[c * width * height + h * width + w] =
+                      im.at<cv::Vec3f>(h, w)[c];
+          }
+      }
   }
   return true;
 }