base.cc 3.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108
  1. #include "ultra_infer/vision/detection/ppdet/base.h"
  2. #include "ultra_infer/utils/unique_ptr.h"
  3. #include "ultra_infer/vision/utils/utils.h"
  4. #include "yaml-cpp/yaml.h"
  5. namespace ultra_infer {
  6. namespace vision {
  7. namespace detection {
  8. PPDetBase::PPDetBase(const std::string &model_file,
  9. const std::string &params_file,
  10. const std::string &config_file,
  11. const RuntimeOption &custom_option,
  12. const ModelFormat &model_format)
  13. : preprocessor_(config_file), postprocessor_(preprocessor_.GetArch()) {
  14. runtime_option = custom_option;
  15. runtime_option.model_format = model_format;
  16. runtime_option.model_file = model_file;
  17. runtime_option.params_file = params_file;
  18. }
  19. std::unique_ptr<PPDetBase> PPDetBase::Clone() const {
  20. std::unique_ptr<PPDetBase> clone_model =
  21. ultra_infer::utils::make_unique<PPDetBase>(PPDetBase(*this));
  22. clone_model->SetRuntime(clone_model->CloneRuntime());
  23. return clone_model;
  24. }
  25. bool PPDetBase::Initialize() {
  26. if (!InitRuntime()) {
  27. FDERROR << "Failed to initialize ultra_infer backend." << std::endl;
  28. return false;
  29. }
  30. return true;
  31. }
  32. bool PPDetBase::Predict(cv::Mat *im, DetectionResult *result) {
  33. return Predict(*im, result);
  34. }
  35. bool PPDetBase::Predict(const cv::Mat &im, DetectionResult *result) {
  36. std::vector<DetectionResult> results;
  37. if (!BatchPredict({im}, &results)) {
  38. return false;
  39. }
  40. *result = std::move(results[0]);
  41. return true;
  42. }
  43. bool PPDetBase::BatchPredict(const std::vector<cv::Mat> &imgs,
  44. std::vector<DetectionResult> *results) {
  45. std::vector<FDMat> fd_images = WrapMat(imgs);
  46. if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
  47. FDERROR << "Failed to preprocess the input image." << std::endl;
  48. return false;
  49. }
  50. reused_input_tensors_[0].name = "image";
  51. reused_input_tensors_[1].name = "scale_factor";
  52. reused_input_tensors_[2].name = "im_shape";
  53. if (NumInputsOfRuntime() == 1) {
  54. auto scale_factor = static_cast<float *>(reused_input_tensors_[1].Data());
  55. postprocessor_.SetScaleFactor({scale_factor[0], scale_factor[1]});
  56. }
  57. // Some models don't need scale_factor and im_shape as input
  58. while (reused_input_tensors_.size() != NumInputsOfRuntime()) {
  59. reused_input_tensors_.pop_back();
  60. }
  61. if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
  62. FDERROR << "Failed to inference by runtime." << std::endl;
  63. return false;
  64. }
  65. if (!postprocessor_.Run(reused_output_tensors_, results)) {
  66. FDERROR << "Failed to postprocess the inference results by runtime."
  67. << std::endl;
  68. return false;
  69. }
  70. return true;
  71. }
  72. bool PPDetBase::CheckArch() {
  73. // Add "PicoDet" arch for backward compatibility with the
  74. // old ppdet model, such as picodet from PaddleClas
  75. // PP-ShiTuV2 pipeline.
  76. std::vector<std::string> archs = {
  77. "SOLOv2", "YOLO", "SSD", "RetinaNet", "RCNN", "Face",
  78. "GFL", "YOLOX", "YOLOv5", "YOLOv6", "YOLOv7", "RTMDet",
  79. "FCOS", "TTFNet", "TOOD", "DETR", "PicoDet"};
  80. auto arch_ = preprocessor_.GetArch();
  81. for (auto item : archs) {
  82. if (arch_ == item) {
  83. return true;
  84. }
  85. }
  86. FDWARNING << "Please set model arch,"
  87. << "support value : SOLOv2, YOLO, SSD, RetinaNet, "
  88. << "RCNN, Face , GFL , RTMDet ,"
  89. << "FCOS , TTFNet , TOOD , DETR, PicoDet" << std::endl;
  90. return false;
  91. }
  92. } // namespace detection
  93. } // namespace vision
  94. } // namespace ultra_infer