preprocessor.h 3.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/transform.h"
  16. #include "ultra_infer/vision/common/result.h"
  17. namespace ultra_infer {
  18. namespace vision {
  19. namespace detection {
  20. /*! @brief Preprocessor object for YOLOv7 serials model.
  21. */
  22. class ULTRAINFER_DECL YOLOv7Preprocessor {
  23. public:
  24. /** \brief Create a preprocessor instance for YOLOv7 serials model
  25. */
  26. YOLOv7Preprocessor();
  27. /** \brief Process the input image and prepare input tensors for runtime
  28. *
  29. * \param[in] images The input image data list, all the elements are returned
  30. * by cv::imread() \param[in] outputs The output tensors which will feed in
  31. * runtime \param[in] ims_info The shape info list, record input_shape and
  32. * output_shape \return true if the preprocess succeeded, otherwise false
  33. */
  34. bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
  35. std::vector<std::map<std::string, std::array<float, 2>>> *ims_info);
  36. /// Set target size, tuple of (width, height), default size = {640, 640}
  37. void SetSize(const std::vector<int> &size) { size_ = size; }
  38. /// Get target size, tuple of (width, height), default size = {640, 640}
  39. std::vector<int> GetSize() const { return size_; }
  40. /// Set padding value, size should be the same as channels
  41. void SetPaddingValue(const std::vector<float> &padding_value) {
  42. padding_value_ = padding_value;
  43. }
  44. /// Get padding value, size should be the same as channels
  45. std::vector<float> GetPaddingValue() const { return padding_value_; }
  46. /// Set is_scale_up, if is_scale_up is false, the input image only
  47. /// can be zoom out, the maximum resize scale cannot exceed 1.0, default true
  48. void SetScaleUp(bool is_scale_up) { is_scale_up_ = is_scale_up; }
  49. /// Get is_scale_up, default true
  50. bool GetScaleUp() const { return is_scale_up_; }
  51. protected:
  52. bool Preprocess(FDMat *mat, FDTensor *output,
  53. std::map<std::string, std::array<float, 2>> *im_info);
  54. void LetterBox(FDMat *mat);
  55. // target size, tuple of (width, height), default size = {640, 640}
  56. std::vector<int> size_;
  57. // padding value, size should be the same as channels
  58. std::vector<float> padding_value_;
  59. // only pad to the minimum rectangle which height and width is times of stride
  60. bool is_mini_pad_;
  61. // while is_mini_pad = false and is_no_pad = true,
  62. // will resize the image to the set size
  63. bool is_no_pad_;
  64. // if is_scale_up is false, the input image only can be zoom out,
  65. // the maximum resize scale cannot exceed 1.0
  66. bool is_scale_up_;
  67. // padding stride, for is_mini_pad
  68. int stride_;
  69. // for offsetting the boxes by classes when using NMS
  70. float max_wh_;
  71. };
  72. } // namespace detection
  73. } // namespace vision
  74. } // namespace ultra_infer