cls_preprocessor.h 3.4 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/manager.h"
  16. #include "ultra_infer/vision/common/processors/transform.h"
  17. #include "ultra_infer/vision/common/result.h"
  18. namespace ultra_infer {
  19. namespace vision {
  20. namespace ocr {
  21. /*! @brief Preprocessor object for Classifier serials model.
  22. */
  23. class ULTRAINFER_DECL ClassifierPreprocessor : public ProcessorManager {
  24. public:
  25. ClassifierPreprocessor();
  26. using ProcessorManager::Run;
  27. /** \brief Process the input image and prepare input tensors for runtime
  28. *
  29. * \param[in] images The input data list, all the elements are FDMat
  30. * \param[in] outputs The output tensors which will be fed into runtime
  31. * \return true if the preprocess succeeded, otherwise false
  32. */
  33. bool Run(std::vector<FDMat> *images, std::vector<FDTensor> *outputs,
  34. size_t start_index, size_t end_index);
  35. /** \brief Implement the virtual function of ProcessorManager, Apply() is the
  36. * body of Run(). Apply() contains the main logic of preprocessing, Run() is
  37. * called by users to execute preprocessing
  38. *
  39. * \param[in] image_batch The input image batch
  40. * \param[in] outputs The output tensors which will feed in runtime
  41. * \return true if the preprocess succeeded, otherwise false
  42. */
  43. virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
  44. /// Set preprocess normalize parameters, please call this API to customize
  45. /// the normalize parameters, otherwise it will use the default normalize
  46. /// parameters.
  47. void SetNormalize(const std::vector<float> &mean,
  48. const std::vector<float> &std, bool is_scale) {
  49. normalize_op_ = std::make_shared<Normalize>(mean, std, is_scale);
  50. }
  51. /// Set cls_image_shape for the classification preprocess
  52. void SetClsImageShape(const std::vector<int> &cls_image_shape) {
  53. cls_image_shape_ = cls_image_shape;
  54. }
  55. /// Get cls_image_shape for the classification preprocess
  56. std::vector<int> GetClsImageShape() const { return cls_image_shape_; }
  57. /// This function will disable normalize in preprocessing step.
  58. void DisableNormalize() { disable_permute_ = true; }
  59. /// This function will disable hwc2chw in preprocessing step.
  60. void DisablePermute() { disable_normalize_ = true; }
  61. private:
  62. void OcrClassifierResizeImage(FDMat *mat,
  63. const std::vector<int> &cls_image_shape);
  64. // for recording the switch of hwc2chw
  65. bool disable_permute_ = false;
  66. // for recording the switch of normalize
  67. bool disable_normalize_ = false;
  68. std::vector<int> cls_image_shape_ = {3, 48, 192};
  69. std::shared_ptr<Resize> resize_op_;
  70. std::shared_ptr<Pad> pad_op_;
  71. std::shared_ptr<Normalize> normalize_op_;
  72. std::shared_ptr<HWC2CHW> hwc2chw_op_;
  73. };
  74. } // namespace ocr
  75. } // namespace vision
  76. } // namespace ultra_infer