浏览代码

change chinese annotation to english

syyxsxx 5 年之前
父节点
当前提交
151ae40bed

+ 34 - 34
deploy/cpp/src/paddlex.cpp

@@ -40,7 +40,7 @@ void Model::create_predictor(const std::string& model_dir,
   }
 #endif
   if (yaml_input == "") {
-    // 读取配置文件
+    // read yaml file
     std::ifstream yaml_fin(yaml_file);
     yaml_fin.seekg(0, std::ios::end);
     size_t yaml_file_size = yaml_fin.tellg();
@@ -48,7 +48,7 @@ void Model::create_predictor(const std::string& model_dir,
     yaml_fin.seekg(0);
     yaml_fin.read(&yaml_input[0], yaml_file_size);
   }
-  // 读取配置文件内容
+  // load yaml file
   if (!load_config(yaml_input)) {
     std::cerr << "Parse file 'model.yml' failed!" << std::endl;
     exit(-1);
@@ -64,13 +64,13 @@ void Model::create_predictor(const std::string& model_dir,
   }
   config.SwitchUseFeedFetchOps(false);
   config.SwitchSpecifyInputNames(true);
-  // 开启图优化
+  // enable graph Optim
 #if defined(__arm__) || defined(__aarch64__)
   config.SwitchIrOptim(false);
 #else
   config.SwitchIrOptim(use_ir_optim);
 #endif
-  // 开启内存优化
+  // enable Memory Optim
   config.EnableMemoryOptim();
   if (use_trt) {
     config.EnableTensorRtEngine(
@@ -108,9 +108,9 @@ bool Model::load_config(const std::string& yaml_input) {
       return false;
     }
   }
-  // 构建数据处理流
+  // build data preprocess stream
   transforms_.Init(config["Transforms"], to_rgb);
-  // 读入label list
+  // read label list
   labels.clear();
   for (const auto& item : config["_Attributes"]["labels"]) {
     int index = labels.size();
@@ -152,19 +152,19 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
                  "to function predict()!" << std::endl;
     return false;
   }
-  // 处理输入图像
+  // im preprocess
   if (!preprocess(im, &inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
-  // 使用加载的模型进行预测
+  // predict
   auto in_tensor = predictor_->GetInputTensor("image");
   int h = inputs_.new_im_size_[0];
   int w = inputs_.new_im_size_[1];
   in_tensor->Reshape({1, 3, h, w});
   in_tensor->copy_from_cpu(inputs_.im_data_.data());
   predictor_->ZeroCopyRun();
-  // 取出模型的输出结果
+  // get result
   auto output_names = predictor_->GetOutputNames();
   auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
   std::vector<int> output_shape = output_tensor->shape();
@@ -174,7 +174,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
   }
   outputs_.resize(size);
   output_tensor->copy_to_cpu(outputs_.data());
-  // 对模型输出结果进行后处理
+  // postprocess
   auto ptr = std::max_element(std::begin(outputs_), std::end(outputs_));
   result->category_id = std::distance(std::begin(outputs_), ptr);
   result->score = *ptr;
@@ -198,12 +198,12 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
     return false;
   }
   inputs_batch_.assign(im_batch.size(), ImageBlob());
-  // 处理输入图像
+  // preprocess
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
-  // 使用加载的模型进行预测
+  // predict
   int batch_size = im_batch.size();
   auto in_tensor = predictor_->GetInputTensor("image");
   int h = inputs_batch_[0].new_im_size_[0];
@@ -218,7 +218,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
   in_tensor->copy_from_cpu(inputs_data.data());
   // in_tensor->copy_from_cpu(inputs_.im_data_.data());
   predictor_->ZeroCopyRun();
-  // 取出模型的输出结果
+  // get result
   auto output_names = predictor_->GetOutputNames();
   auto output_tensor = predictor_->GetOutputTensor(output_names[0]);
   std::vector<int> output_shape = output_tensor->shape();
@@ -228,7 +228,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
   }
   outputs_.resize(size);
   output_tensor->copy_to_cpu(outputs_.data());
-  // 对模型输出结果进行后处理
+  // postprocess
   (*results).clear();
   (*results).resize(batch_size);
   int single_batch_size = size / batch_size;
@@ -258,7 +258,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
     return false;
   }
 
-  // 处理输入图像
+  // preprocess
   if (!preprocess(im, &inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
@@ -288,7 +288,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
     im_info_tensor->copy_from_cpu(im_info);
     im_shape_tensor->copy_from_cpu(im_shape);
   }
-  // 使用加载的模型进行预测
+  // predict
   predictor_->ZeroCopyRun();
 
   std::vector<float> output_box;
@@ -306,7 +306,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
     return true;
   }
   int num_boxes = size / 6;
-  // 解析预测框box
+  // box postprocess
   for (int i = 0; i < num_boxes; ++i) {
     Box box;
     box.category_id = static_cast<int>(round(output_box[i * 6]));
@@ -321,7 +321,7 @@ bool Model::predict(const cv::Mat& im, DetResult* result) {
     box.coordinate = {xmin, ymin, w, h};
     result->boxes.push_back(std::move(box));
   }
-  // 实例分割需解析mask
+  // mask postprocess
   if (name == "MaskRCNN") {
     std::vector<float> output_mask;
     auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
@@ -366,12 +366,12 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
 
   inputs_batch_.assign(im_batch.size(), ImageBlob());
   int batch_size = im_batch.size();
-  // 处理输入图像
+  // preprocess
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
-  // 对RCNN类模型做批量padding
+  // RCNN model padding
   if (batch_size > 1) {
     if (name == "FasterRCNN" || name == "MaskRCNN") {
       int max_h = -1;
@@ -452,10 +452,10 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
     im_info_tensor->copy_from_cpu(im_info.data());
     im_shape_tensor->copy_from_cpu(im_shape.data());
   }
-  // 使用加载的模型进行预测
+  // predict
   predictor_->ZeroCopyRun();
 
-  // 读取所有box
+  // get all box
   std::vector<float> output_box;
   auto output_names = predictor_->GetOutputNames();
   auto output_box_tensor = predictor_->GetOutputTensor(output_names[0]);
@@ -472,7 +472,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
   }
   auto lod_vector = output_box_tensor->lod();
   int num_boxes = size / 6;
-  // 解析预测框box
+  // box postprocess
   (*results).clear();
   (*results).resize(batch_size);
   for (int i = 0; i < lod_vector[0].size() - 1; ++i) {
@@ -492,7 +492,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
     }
   }
 
-  // 实例分割需解析mask
+  // mask postprocess
   if (name == "MaskRCNN") {
     std::vector<float> output_mask;
     auto output_mask_tensor = predictor_->GetOutputTensor(output_names[1]);
@@ -537,7 +537,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
     return false;
   }
 
-  // 处理输入图像
+  // preprocess
   if (!preprocess(im, &inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
@@ -549,10 +549,10 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
   im_tensor->Reshape({1, 3, h, w});
   im_tensor->copy_from_cpu(inputs_.im_data_.data());
 
-  // 使用加载的模型进行预测
+  // predict
   predictor_->ZeroCopyRun();
 
-  // 获取预测置信度,经过argmax后的labelmap
+  // get labelmap
   auto output_names = predictor_->GetOutputNames();
   auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
   std::vector<int> output_label_shape = output_label_tensor->shape();
@@ -565,7 +565,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
   result->label_map.data.resize(size);
   output_label_tensor->copy_to_cpu(result->label_map.data.data());
 
-  // 获取预测置信度scoremap
+  // get scoremap
   auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
   std::vector<int> output_score_shape = output_score_tensor->shape();
   size = 1;
@@ -577,7 +577,7 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
   result->score_map.data.resize(size);
   output_score_tensor->copy_to_cpu(result->score_map.data.data());
 
-  // 解析输出结果到原图大小
+  // get origin image result
   std::vector<uint8_t> label_map(result->label_map.data.begin(),
                                  result->label_map.data.end());
   cv::Mat mask_label(result->label_map.shape[1],
@@ -647,7 +647,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
     return false;
   }
 
-  // 处理输入图像
+  // preprocess
   inputs_batch_.assign(im_batch.size(), ImageBlob());
   if (!preprocess(im_batch, &inputs_batch_, thread_num)) {
     std::cerr << "Preprocess failed!" << std::endl;
@@ -670,10 +670,10 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
   im_tensor->copy_from_cpu(inputs_data.data());
   // im_tensor->copy_from_cpu(inputs_.im_data_.data());
 
-  // 使用加载的模型进行预测
+  // predict
   predictor_->ZeroCopyRun();
 
-  // 获取预测置信度,经过argmax后的labelmap
+  // get labelmap
   auto output_names = predictor_->GetOutputNames();
   auto output_label_tensor = predictor_->GetOutputTensor(output_names[0]);
   std::vector<int> output_label_shape = output_label_tensor->shape();
@@ -698,7 +698,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
               (*results)[i].label_map.data.data());
   }
 
-  // 获取预测置信度scoremap
+  // get scoremap
   auto output_score_tensor = predictor_->GetOutputTensor(output_names[1]);
   std::vector<int> output_score_shape = output_score_tensor->shape();
   size = 1;
@@ -722,7 +722,7 @@ bool Model::predict(const std::vector<cv::Mat>& im_batch,
               (*results)[i].score_map.data.data());
   }
 
-  // 解析输出结果到原图大小
+  // get origin image result
   for (int i = 0; i < batch_size; ++i) {
     std::vector<uint8_t> label_map((*results)[i].label_map.data.begin(),
                                    (*results)[i].label_map.data.end());

+ 7 - 5
deploy/cpp/src/transforms.cpp

@@ -12,12 +12,14 @@
 // See the License for the specific language governing permissions and
 // limitations under the License.
 
+#include "include/paddlex/transforms.h"
+
+#include <math.h>
+
 #include <iostream>
 #include <string>
 #include <vector>
-#include <math.h>
 
-#include "include/paddlex/transforms.h"
 
 namespace PaddleX {
 
@@ -195,7 +197,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
 }
 
 bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
-  // 按照transforms中预处理算子顺序处理图像
+  // do all preprocess ops by order
   if (to_rgb_) {
     cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
   }
@@ -211,8 +213,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
     }
   }
 
-  // 将图像由NHWC转为NCHW格式
-  // 同时转为连续的内存块存储到ImageBlob
+  // data format NHWC to NCHW
+  // img data save to ImageBlob
   int h = im->rows;
   int w = im->cols;
   int c = im->channels();

+ 4 - 4
deploy/cpp/src/visualize.cpp

@@ -47,7 +47,7 @@ cv::Mat Visualize(const cv::Mat& img,
                             boxes[i].coordinate[2],
                             boxes[i].coordinate[3]);
 
-    // 生成预测框和标题
+    // draw box and title
     std::string text = boxes[i].category;
     int c1 = colormap[3 * boxes[i].category_id + 0];
     int c2 = colormap[3 * boxes[i].category_id + 1];
@@ -63,13 +63,13 @@ cv::Mat Visualize(const cv::Mat& img,
     origin.x = roi.x;
     origin.y = roi.y;
 
-    // 生成预测框标题的背景
+    // background
     cv::Rect text_back = cv::Rect(boxes[i].coordinate[0],
                                   boxes[i].coordinate[1] - text_size.height,
                                   text_size.width,
                                   text_size.height);
 
-    // 绘图和文字
+    // draw
     cv::rectangle(vis_img, roi, roi_color, 2);
     cv::rectangle(vis_img, text_back, roi_color, -1);
     cv::putText(vis_img,
@@ -80,7 +80,7 @@ cv::Mat Visualize(const cv::Mat& img,
                 cv::Scalar(255, 255, 255),
                 thickness);
 
-    // 生成实例分割mask
+    // mask
     if (boxes[i].mask.data.size() == 0) {
       continue;
     }

+ 2 - 2
deploy/openvino/demo/classifier.cpp

@@ -44,11 +44,11 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  // 加载模型
+  // load model
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
 
-  // 进行预测
+  // predict
   if (FLAGS_image_list != "") {
     std::ifstream inf(FLAGS_image_list);
     if (!inf) {

+ 3 - 3
deploy/openvino/demo/detector.cpp

@@ -54,13 +54,13 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  //
+  // load model
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
 
   int imgs = 1;
   auto colormap = PaddleX::GenerateColorMap(model.labels.size());
-  // 进行预测
+  // predict
   if (FLAGS_image_list != "") {
     std::ifstream inf(FLAGS_image_list);
     if (!inf) {
@@ -96,7 +96,7 @@ int main(int argc, char** argv) {
                 << result.boxes[i].coordinate[3] << ")" << std::endl;
     }
     if (FLAGS_save_dir != "") {
-    // 可视化
+    // visualize
       cv::Mat vis_img = PaddleX::Visualize(
         im, result, model.labels, colormap, FLAGS_threshold);
       std::string save_path =

+ 1 - 3
deploy/openvino/demo/segmenter.cpp

@@ -48,11 +48,9 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  //
-  std::cout << "init start" << std::endl;
+  // load model
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_device);
-  std::cout << "init done" << std::endl;
   int imgs = 1;
   auto colormap = PaddleX::GenerateColorMap(model.labels.size());
 

+ 8 - 7
deploy/openvino/src/paddlex.cpp

@@ -67,9 +67,9 @@ bool Model::load_config(const std::string& cfg_file) {
       return false;
     }
   }
-  // 构建数据处理流
+  // init preprocess ops
   transforms_.Init(config["Transforms"], type, to_rgb);
-  // 读入label lis
+  // read label list
   for (const auto& item : config["_Attributes"]["labels"]) {
     int index = labels.size();
     labels[index] = item.as<std::string>();
@@ -98,7 +98,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
               << std::endl;
     return false;
   }
-  // 处理输入图像
+  // preprocess
   InferenceEngine::InferRequest infer_request =
     executable_network_.CreateInferRequest();
   std::string input_name = network_.getInputsInfo().begin()->first;
@@ -109,6 +109,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
     return false;
   }
 
+  // predict
   infer_request.Infer();
 
   std::string output_name = network_.getOutputsInfo().begin()->first;
@@ -118,7 +119,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
   auto moutputHolder = moutput->rmap();
   float* outputs_data = moutputHolder.as<float *>();
 
-  // 对模型输出结果进行后处理
+  // post process
   auto ptr = std::max_element(outputs_data, outputs_data+sizeof(outputs_data));
   result->category_id = std::distance(outputs_data, ptr);
   result->score = *ptr;
@@ -206,20 +207,20 @@ bool Model::predict(const cv::Mat& im, SegResult* result) {
                  "function predict()!" << std::endl;
     return false;
   }
-  //
+  // init infer
   InferenceEngine::InferRequest infer_request =
     executable_network_.CreateInferRequest();
   std::string input_name = network_.getInputsInfo().begin()->first;
   inputs_.blob = infer_request.GetBlob(input_name);
 
-  //
+  // preprocess
   cv::Mat im_clone = im.clone();
   if (!preprocess(&im_clone, &inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
 
-  //
+  // predict
   infer_request.Infer();
 
   InferenceEngine::OutputsDataMap out_map = network_.getOutputsInfo();

+ 3 - 3
deploy/openvino/src/transforms.cpp

@@ -201,7 +201,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
 }
 
 bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
-  // 按照transforms中预处理算子顺序处理图像
+  // preprocess by order
   if (to_rgb_) {
     cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
   }
@@ -224,8 +224,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
     }
   }
 
-  // 将图像由NHWC转为NCHW格式
-  // 同时转为连续的内存块存储到Blob
+  // image format NHWC to NCHW
+  // img data save to ImageBlob
   InferenceEngine::SizeVector blobSize = data->blob->getTensorDesc().getDims();
   const size_t width = blobSize[3];
   const size_t height = blobSize[2];

+ 4 - 4
deploy/openvino/src/visualize.cpp

@@ -47,7 +47,7 @@ cv::Mat Visualize(const cv::Mat& img,
                             boxes[i].coordinate[2],
                             boxes[i].coordinate[3]);
 
-    // 生成预测框和标题
+    // draw box and title
     std::string text = boxes[i].category;
     int c1 = colormap[3 * boxes[i].category_id + 0];
     int c2 = colormap[3 * boxes[i].category_id + 1];
@@ -63,13 +63,13 @@ cv::Mat Visualize(const cv::Mat& img,
     origin.x = roi.x;
     origin.y = roi.y;
 
-    // 生成预测框标题的背景
+    // background
     cv::Rect text_back = cv::Rect(boxes[i].coordinate[0],
                                   boxes[i].coordinate[1] - text_size.height,
                                   text_size.width,
                                   text_size.height);
 
-    // 绘图和文字
+    // draw
     cv::rectangle(vis_img, roi, roi_color, 2);
     cv::rectangle(vis_img, text_back, roi_color, -1);
     cv::putText(vis_img,
@@ -80,7 +80,7 @@ cv::Mat Visualize(const cv::Mat& img,
                 cv::Scalar(255, 255, 255),
                 thickness);
 
-    // 生成实例分割mask
+    // mask
     if (boxes[i].mask.data.size() == 0) {
       continue;
     }

+ 2 - 2
deploy/raspberry/demo/classifier.cpp

@@ -44,11 +44,11 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  // 加载模型
+  // load model
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num);
   std::cout << "init is done" << std::endl;
-  // 进行预测
+  // predict
   if (FLAGS_image_list != "") {
     std::ifstream inf(FLAGS_image_list);
     if (!inf) {

+ 3 - 3
deploy/raspberry/demo/detector.cpp

@@ -54,13 +54,13 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  //
+  // load model
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num);
 
   int imgs = 1;
   auto colormap = PaddleX::GenerateColorMap(model.labels.size());
-  // 进行预测
+  // predict
   if (FLAGS_image_list != "") {
     std::ifstream inf(FLAGS_image_list);
     if (!inf) {
@@ -97,7 +97,7 @@ int main(int argc, char** argv) {
                 << result.boxes[i].coordinate[3] << ")" << std::endl;
     }
     if (FLAGS_save_dir != "") {
-    // 可视化
+    // visualize
       cv::Mat vis_img = PaddleX::Visualize(
         im, result, model.labels, colormap, FLAGS_threshold);
       std::string save_path =

+ 1 - 1
deploy/raspberry/demo/segmenter.cpp

@@ -47,7 +47,7 @@ int main(int argc, char** argv) {
     return -1;
   }
 
-  //
+  // load model
   std::cout << "init start" << std::endl;
   PaddleX::Model model;
   model.Init(FLAGS_model_dir, FLAGS_cfg_file, FLAGS_thread_num);

+ 5 - 5
deploy/raspberry/src/paddlex.cpp

@@ -46,9 +46,9 @@ bool Model::load_config(const std::string& cfg_file) {
       return false;
     }
   }
-  // 构建数据处理流
+  // init preprocess ops
   transforms_.Init(config["Transforms"], to_rgb);
-  // 读入label lis
+  // read label list
   for (const auto& item : config["_Attributes"]["labels"]) {
     int index = labels.size();
     labels[index] = item.as<std::string>();
@@ -77,14 +77,14 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
               << std::endl;
     return false;
   }
-  // 处理输入图像
+  // preprocess
   inputs_.input_tensor_ = std::move(predictor_->GetInput(0));
   cv::Mat im_clone = im.clone();
   if (!preprocess(&im_clone, &inputs_)) {
     std::cerr << "Preprocess failed!" << std::endl;
     return false;
   }
-
+  // predict
   predictor_->Run();
 
   std::unique_ptr<const paddle::lite_api::Tensor> output_tensor(
@@ -92,7 +92,7 @@ bool Model::predict(const cv::Mat& im, ClsResult* result) {
   const float *outputs_data = output_tensor->mutable_data<float>();
 
 
-  // 对模型输出结果进行后处理
+  // postprocess
   auto ptr = std::max_element(outputs_data, outputs_data+sizeof(outputs_data));
   result->category_id = std::distance(outputs_data, ptr);
   result->score = *ptr;

+ 3 - 3
deploy/raspberry/src/transforms.cpp

@@ -201,7 +201,7 @@ std::shared_ptr<Transform> Transforms::CreateTransform(
 }
 
 bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
-  // 按照transforms中预处理算子顺序处理图像
+  // preprocess by order
   if (to_rgb_) {
     cv::cvtColor(*im, *im, cv::COLOR_BGR2RGB);
   }
@@ -218,8 +218,8 @@ bool Transforms::Run(cv::Mat* im, ImageBlob* data) {
     }
   }
 
-  // 将图像由NHWC转为NCHW格式
-  // 同时转为连续的内存块存储到Blob
+  // image format NHWC to NCHW
+  // img data save to ImageBlob
   int height = im->rows;
   int width = im->cols;
   int channels = im->channels();