normalize.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/base.h"
  16. namespace ultra_infer {
  17. namespace vision {
  18. /*! @brief Processor for Normalize images with given parameters.
  19. */
  20. class ULTRAINFER_DECL Normalize : public Processor {
  21. public:
  22. Normalize(const std::vector<float> &mean, const std::vector<float> &std,
  23. bool is_scale = true,
  24. const std::vector<float> &min = std::vector<float>(),
  25. const std::vector<float> &max = std::vector<float>(),
  26. bool swap_rb = false);
  27. bool ImplByOpenCV(Mat *mat);
  28. #ifdef ENABLE_FLYCV
  29. bool ImplByFlyCV(Mat *mat);
  30. #endif
  31. #ifdef WITH_GPU
  32. bool ImplByCuda(FDMat *mat);
  33. bool ImplByCuda(FDMatBatch *mat_batch);
  34. #endif
  35. #ifdef ENABLE_CVCUDA
  36. bool ImplByCvCuda(FDMat *mat);
  37. bool ImplByCvCuda(FDMatBatch *mat_batch);
  38. #endif
  39. std::string Name() { return "Normalize"; }
  40. // While use normalize, it is more recommend not use this function
  41. // this function will need to compute result = ((mat / 255) - mean) / std
  42. // if we use the following method
  43. // ```
  44. // auto norm = Normalize(...)
  45. // norm(mat)
  46. // ```
  47. // There will be some precomputation in construct function
  48. // and the `norm(mat)` only need to compute result = mat * alpha + beta
  49. // which will reduce lots of time
  50. /** \brief Process the input images
  51. *
  52. * \param[in] mat The input image data, `result = mat * alpha + beta`
  53. * \param[in] mean target mean vector of output images
  54. * \param[in] std target std vector of output images
  55. * \param[in] max max value vector to be in target image
  56. * \param[in] min min value vector to be in target image
  57. * \param[in] lib to define OpenCV or FlyCV or CVCUDA will be used.
  58. * \param[in] swap_rb to define whether to swap r and b channel order
  59. * \return true if the process succeeded, otherwise false
  60. */
  61. static bool Run(Mat *mat, const std::vector<float> &mean,
  62. const std::vector<float> &std, bool is_scale = true,
  63. const std::vector<float> &min = std::vector<float>(),
  64. const std::vector<float> &max = std::vector<float>(),
  65. ProcLib lib = ProcLib::DEFAULT, bool swap_rb = false);
  66. std::vector<float> GetAlpha() const { return alpha_; }
  67. std::vector<float> GetBeta() const { return beta_; }
  68. bool GetSwapRB() { return swap_rb_; }
  69. /** \brief Process the input images
  70. *
  71. * \param[in] swap_rb set the value of the swap_rb parameter
  72. */
  73. void SetSwapRB(bool swap_rb) { swap_rb_ = swap_rb; }
  74. private:
  75. std::vector<float> alpha_;
  76. std::vector<float> beta_;
  77. FDTensor gpu_alpha_;
  78. FDTensor gpu_beta_;
  79. bool swap_rb_;
  80. };
  81. } // namespace vision
  82. } // namespace ultra_infer