postprocessor.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/transform.h"
  16. #include "ultra_infer/vision/common/result.h"
  17. #include "ultra_infer/vision/detection/ppdet/multiclass_nms.h"
  18. #include "ultra_infer/vision/detection/ppdet/multiclass_nms_rotated.h"
  19. namespace ultra_infer {
  20. namespace vision {
  21. namespace detection {
  22. /*! @brief Postprocessor object for PaddleDet serials model.
  23. */
  24. class ULTRAINFER_DECL PaddleDetPostprocessor {
  25. public:
  26. PaddleDetPostprocessor() {
  27. // There may be no NMS config in the yaml file,
  28. // so we need to give a initial value to multi_class_nms_.
  29. multi_class_nms_.SetNMSOption(NMSOption());
  30. multi_class_nms_rotated_.SetNMSRotatedOption(NMSRotatedOption());
  31. }
  32. /** \brief Create a preprocessor instance for PaddleDet serials model
  33. *
  34. * \param[in] config_file Path of configuration file for deployment, e.g
  35. * ppyoloe/infer_cfg.yml
  36. */
  37. explicit PaddleDetPostprocessor(const std::string &arch) {
  38. // Used to differentiate models
  39. arch_ = arch;
  40. // There may be no NMS config in the yaml file,
  41. // so we need to give a initial value to multi_class_nms_.
  42. multi_class_nms_.SetNMSOption(NMSOption());
  43. multi_class_nms_rotated_.SetNMSRotatedOption(NMSRotatedOption());
  44. }
  45. /** \brief Process the result of runtime and fill to ClassifyResult structure
  46. *
  47. * \param[in] tensors The inference result from runtime
  48. * \param[in] result The output result of detection
  49. * \return true if the postprocess succeeded, otherwise false
  50. */
  51. bool Run(const std::vector<FDTensor> &tensors,
  52. std::vector<DetectionResult> *result);
  53. /// Apply box decoding and nms step for the outputs for the model.This is
  54. /// only available for those model exported without box decoding and nms.
  55. void ApplyNMS() { with_nms_ = false; }
  56. /// If you do not want to modify the Yaml configuration file,
  57. /// you can use this function to set rotated NMS parameters.
  58. void SetNMSRotatedOption(const NMSRotatedOption &option) {
  59. multi_class_nms_rotated_.SetNMSRotatedOption(option);
  60. }
  61. /// If you do not want to modify the Yaml configuration file,
  62. /// you can use this function to set NMS parameters.
  63. void SetNMSOption(const NMSOption &option) {
  64. multi_class_nms_.SetNMSOption(option);
  65. }
  66. // Set scale_factor_ value.This is only available for those model exported
  67. // without nms.
  68. void SetScaleFactor(const std::vector<float> &scale_factor_value) {
  69. scale_factor_ = scale_factor_value;
  70. }
  71. private:
  72. std::vector<float> scale_factor_{0.0, 0.0};
  73. std::vector<float> GetScaleFactor() { return scale_factor_; }
  74. // for model without nms.
  75. bool with_nms_ = true;
  76. // Used to differentiate models
  77. std::string arch_;
  78. PaddleMultiClassNMS multi_class_nms_{};
  79. PaddleMultiClassNMSRotated multi_class_nms_rotated_{};
  80. // Process for General tensor without nms.
  81. bool ProcessWithoutNMS(const std::vector<FDTensor> &tensors,
  82. std::vector<DetectionResult> *results);
  83. // Process for General tensor with nms.
  84. bool ProcessWithNMS(const std::vector<FDTensor> &tensors,
  85. std::vector<DetectionResult> *results);
  86. // Process SOLOv2
  87. bool ProcessSolov2(const std::vector<FDTensor> &tensors,
  88. std::vector<DetectionResult> *results);
  89. // Process PPYOLOER
  90. bool ProcessPPYOLOER(const std::vector<FDTensor> &tensors,
  91. std::vector<DetectionResult> *results);
  92. // Process mask tensor for MaskRCNN
  93. bool ProcessMask(const FDTensor &tensor,
  94. std::vector<DetectionResult> *results);
  95. };
  96. } // namespace detection
  97. } // namespace vision
  98. } // namespace ultra_infer