preprocessor.h 2.7 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/manager.h"
  16. #include "ultra_infer/vision/common/processors/transform.h"
  17. #include "ultra_infer/vision/common/result.h"
  18. namespace ultra_infer {
  19. namespace vision {
  20. namespace classification {
  21. /*! @brief Preprocessor object for PaddleClas serials model.
  22. */
  23. class ULTRAINFER_DECL PaddleClasPreprocessor : public ProcessorManager {
  24. public:
  25. /** \brief Create a preprocessor instance for PaddleClas serials model
  26. *
  27. * \param[in] config_file Path of configuration file for deployment, e.g
  28. * resnet/infer_cfg.yml
  29. */
  30. explicit PaddleClasPreprocessor(const std::string &config_file);
  31. /** \brief Implement the virtual function of ProcessorManager, Apply() is the
  32. * body of Run(). Apply() contains the main logic of preprocessing, Run() is
  33. * called by users to execute preprocessing
  34. *
  35. * \param[in] image_batch The input image batch
  36. * \param[in] outputs The output tensors which will feed in runtime
  37. * \return true if the preprocess succeeded, otherwise false
  38. */
  39. virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
  40. /// This function will disable normalize in preprocessing step.
  41. void DisableNormalize();
  42. /// This function will disable hwc2chw in preprocessing step.
  43. void DisablePermute();
  44. /** \brief When the initial operator is Resize, and input image size is large,
  45. * maybe it's better to run resize on CPU, because the HostToDevice memcpy
  46. * is time consuming. Set this true to run the initial resize on CPU.
  47. *
  48. * \param[in] v true or false
  49. */
  50. void InitialResizeOnCpu(bool v) { initial_resize_on_cpu_ = v; }
  51. private:
  52. bool BuildPreprocessPipelineFromConfig();
  53. bool initialized_ = false;
  54. std::vector<std::shared_ptr<Processor>> processors_;
  55. // for recording the switch of hwc2chw
  56. bool disable_permute_ = false;
  57. // for recording the switch of normalize
  58. bool disable_normalize_ = false;
  59. // read config file
  60. std::string config_file_;
  61. bool initial_resize_on_cpu_ = false;
  62. };
  63. } // namespace classification
  64. } // namespace vision
  65. } // namespace ultra_infer