classifier.cpp 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <glog/logging.h>
  15. #include <algorithm>
  16. #include <chrono>
  17. #include <fstream>
  18. #include <iostream>
  19. #include <string>
  20. #include <vector>
  21. #include <utility>
  22. #include <omp.h>
  23. #include "include/paddlex/paddlex.h"
  24. using namespace std::chrono;
  25. DEFINE_string(model_dir, "", "Path of inference model");
  26. DEFINE_bool(use_gpu, false, "Infering with GPU or CPU");
  27. DEFINE_bool(use_trt, false, "Infering with TensorRT");
  28. DEFINE_int32(gpu_id, 0, "GPU card id");
  29. DEFINE_string(key, "", "key of encryption");
  30. DEFINE_string(image, "", "Path of test image file");
  31. DEFINE_string(image_list, "", "Path of test image list file");
  32. DEFINE_int32(batch_size, 1, "Batch size of infering");
  33. int main(int argc, char** argv) {
  34. // Parsing command-line
  35. google::ParseCommandLineFlags(&argc, &argv, true);
  36. if (FLAGS_model_dir == "") {
  37. std::cerr << "--model_dir need to be defined" << std::endl;
  38. return -1;
  39. }
  40. if (FLAGS_image == "" & FLAGS_image_list == "") {
  41. std::cerr << "--image or --image_list need to be defined" << std::endl;
  42. return -1;
  43. }
  44. // 加载模型
  45. PaddleX::Model model;
  46. model.Init(FLAGS_model_dir, FLAGS_use_gpu, FLAGS_use_trt, FLAGS_gpu_id, FLAGS_key, FLAGS_batch_size);
  47. // 进行预测
  48. double total_running_time_s = 0.0;
  49. double total_imread_time_s = 0.0;
  50. int imgs = 1;
  51. if (FLAGS_image_list != "") {
  52. std::ifstream inf(FLAGS_image_list);
  53. if (!inf) {
  54. std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
  55. return -1;
  56. }
  57. // 多batch预测
  58. std::string image_path;
  59. std::vector<std::string> image_paths;
  60. while (getline(inf, image_path)) {
  61. image_paths.push_back(image_path);
  62. }
  63. imgs = image_paths.size();
  64. for(int i = 0; i < image_paths.size(); i += FLAGS_batch_size) {
  65. auto start = system_clock::now();
  66. // 读图像
  67. int im_vec_size = std::min((int)image_paths.size(), i + FLAGS_batch_size);
  68. std::vector<cv::Mat> im_vec(im_vec_size - i);
  69. std::vector<PaddleX::ClsResult> results(im_vec_size - i, PaddleX::ClsResult());
  70. #pragma omp parallel for num_threads(im_vec_size - i)
  71. for(int j = i; j < im_vec_size; ++j){
  72. im_vec[j - i] = std::move(cv::imread(image_paths[j], 1));
  73. }
  74. auto imread_end = system_clock::now();
  75. model.predict(im_vec, results);
  76. auto imread_duration = duration_cast<microseconds>(imread_end - start);
  77. total_imread_time_s += double(imread_duration.count()) * microseconds::period::num / microseconds::period::den;
  78. auto end = system_clock::now();
  79. auto duration = duration_cast<microseconds>(end - start);
  80. total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
  81. for(int j = i; j < im_vec_size; ++j) {
  82. std::cout << "Path:" << image_paths[j]
  83. << ", predict label: " << results[j - i].category
  84. << ", label_id:" << results[j - i].category_id
  85. << ", score: " << results[j - i].score << std::endl;
  86. }
  87. }
  88. } else {
  89. auto start = system_clock::now();
  90. PaddleX::ClsResult result;
  91. cv::Mat im = cv::imread(FLAGS_image, 1);
  92. model.predict(im, &result);
  93. auto end = system_clock::now();
  94. auto duration = duration_cast<microseconds>(end - start);
  95. total_running_time_s += double(duration.count()) * microseconds::period::num / microseconds::period::den;
  96. std::cout << "Predict label: " << result.category
  97. << ", label_id:" << result.category_id
  98. << ", score: " << result.score << std::endl;
  99. }
  100. std::cout << "Total running time: "
  101. << total_running_time_s
  102. << " s, average running time: "
  103. << total_running_time_s / imgs
  104. << " s/img, total read img time: "
  105. << total_imread_time_s
  106. << " s, average read time: "
  107. << total_imread_time_s / imgs
  108. << " s/img, batch_size = "
  109. << FLAGS_batch_size
  110. << std::endl;
  111. return 0;
  112. }