model_infer.cpp 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. // Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include <glog/logging.h>
  15. #include <omp.h>
  16. #include <memory>
  17. #include <string>
  18. #include <fstream>
  19. #include "model_deploy/common/include/paddle_deploy.h"
  20. DEFINE_string(model_file, "", "Path of inference model");
  21. DEFINE_string(cfg_file, "", "Path of yaml file");
  22. DEFINE_string(model_type, "", "model type");
  23. DEFINE_string(image, "", "Path of test image file");
  24. DEFINE_string(image_list, "", "Path of test image file");
  25. DEFINE_string(trt_cache_file, "", "Cache path to store optimized trt file");
  26. DEFINE_bool(save_engine, false, "Save Trt Engine");
  27. DEFINE_int32(gpu_id, 0, "GPU card id");
  28. int main(int argc, char** argv) {
  29. // Parsing command-line
  30. google::ParseCommandLineFlags(&argc, &argv, true);
  31. std::cout << "ParseCommandLineFlags:FLAGS_model_type="
  32. << FLAGS_model_type << " model_file="
  33. << FLAGS_model_file << std::endl;
  34. // create model
  35. std::shared_ptr<PaddleDeploy::Model> model =
  36. PaddleDeploy::CreateModel(FLAGS_model_type);
  37. if (!model) {
  38. std::cout << "no model_type: " << FLAGS_model_type
  39. << " model=" << model << std::endl;
  40. return 0;
  41. }
  42. std::cout << "start model init " << std::endl;
  43. // model init
  44. model->Init(FLAGS_cfg_file);
  45. std::cout << "start engine init " << std::endl;
  46. // inference engine init
  47. model->TensorRTInit(FLAGS_model_file,
  48. FLAGS_cfg_file,
  49. FLAGS_gpu_id,
  50. FLAGS_save_engine,
  51. FLAGS_trt_cache_file);
  52. // prepare data
  53. std::vector<std::string> image_paths;
  54. if (FLAGS_image_list != "") {
  55. std::ifstream inf(FLAGS_image_list);
  56. if (!inf) {
  57. std::cerr << "Fail to open file " << FLAGS_image_list << std::endl;
  58. return -1;
  59. }
  60. std::string image_path;
  61. while (getline(inf, image_path)) {
  62. image_paths.push_back(image_path);
  63. }
  64. } else if (FLAGS_image != "") {
  65. image_paths.push_back(FLAGS_image);
  66. } else {
  67. std::cerr << "image_list or image should be defined" << std::endl;
  68. return -1;
  69. }
  70. std::cout << "start model predict " << image_paths.size() << std::endl;
  71. // infer
  72. std::vector<PaddleDeploy::Result> results;
  73. std::vector<cv::Mat> imgs;
  74. cv::Mat img;
  75. for (auto i = 0; i < image_paths.size(); ++i) {
  76. img = cv::imread(image_paths[i]);
  77. if (img.empty()) {
  78. std::cerr << "Fail to read image: " << i << std::endl;
  79. return -1;
  80. }
  81. imgs.clear();
  82. imgs.push_back(std::move(img));
  83. model->Predict(imgs, &results);
  84. std::cout << "image: " << image_paths[i] << std::endl;
  85. for (auto j = 0; j < results.size(); ++j) {
  86. std::cout << "Result for sample " << j << std::endl;
  87. std::cout << results[j] << std::endl;
  88. }
  89. }
  90. return 0;
  91. }