petr.cc 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/vision/perception/paddle3d/petr/petr.h"
  15. namespace ultra_infer {
  16. namespace vision {
  17. namespace perception {
  18. Petr::Petr(const std::string &model_file, const std::string &params_file,
  19. const std::string &config_file, const RuntimeOption &custom_option,
  20. const ModelFormat &model_format)
  21. : preprocessor_(config_file) {
  22. valid_cpu_backends = {Backend::PDINFER};
  23. valid_gpu_backends = {Backend::PDINFER};
  24. runtime_option = custom_option;
  25. runtime_option.model_format = model_format;
  26. runtime_option.model_file = model_file;
  27. runtime_option.params_file = params_file;
  28. runtime_option.paddle_infer_option.enable_mkldnn = false;
  29. initialized = Initialize();
  30. }
  31. bool Petr::Initialize() {
  32. if (!InitRuntime()) {
  33. FDERROR << "Failed to initialize ultra_infer backend." << std::endl;
  34. return false;
  35. }
  36. return true;
  37. }
  38. bool Petr::Predict(const cv::Mat &images, PerceptionResult *results) {
  39. FDERROR << "Petr inference only support 6(V1) or 12(V2) images" << std::endl;
  40. return false;
  41. }
  42. bool Petr::BatchPredict(const std::vector<cv::Mat> &images,
  43. std::vector<PerceptionResult> *results) {
  44. if ((images.size() != 6) && (images.size() != 12)) {
  45. FDERROR << "Petr only support 6(V1) or 12(V2) images";
  46. return false;
  47. }
  48. std::vector<FDMat> fd_images = WrapMat(images);
  49. if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
  50. FDERROR << "Failed to preprocess the input image." << std::endl;
  51. return false;
  52. }
  53. // Note: un-commented the codes below to show the debug info.
  54. // reused_input_tensors_[0].PrintInfo();
  55. // reused_input_tensors_[1].PrintInfo();
  56. // reused_input_tensors_[2].PrintInfo();
  57. reused_input_tensors_[0].name = InputInfoOfRuntime(0).name;
  58. reused_input_tensors_[1].name = InputInfoOfRuntime(1).name;
  59. if (images.size() == 12) {
  60. // for Petr V2 timestamp
  61. reused_input_tensors_[2].name = InputInfoOfRuntime(2).name;
  62. } else {
  63. // for Petr V1
  64. reused_input_tensors_.pop_back();
  65. }
  66. if (!Infer(reused_input_tensors_, &reused_output_tensors_)) {
  67. FDERROR << "Failed to inference by runtime." << std::endl;
  68. return false;
  69. }
  70. if (!postprocessor_.Run(reused_output_tensors_, results)) {
  71. FDERROR << "Failed to postprocess the inference results by runtime."
  72. << std::endl;
  73. return false;
  74. }
  75. return true;
  76. }
  77. } // namespace perception
  78. } // namespace vision
  79. } // namespace ultra_infer