preprocessor.h 2.5 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/vision/common/processors/manager.h"
  16. #include "ultra_infer/vision/common/processors/transform.h"
  17. #include "ultra_infer/vision/common/result.h"
  18. namespace ultra_infer {
  19. namespace vision {
  20. namespace detection {
  21. /*! @brief Preprocessor object for PaddleDet serials model.
  22. */
  23. class ULTRAINFER_DECL PaddleDetPreprocessor : public ProcessorManager {
  24. public:
  25. PaddleDetPreprocessor() = default;
  26. /** \brief Create a preprocessor instance for PaddleDet serials model
  27. *
  28. * \param[in] config_file Path of configuration file for deployment, e.g
  29. * ppyoloe/infer_cfg.yml
  30. */
  31. explicit PaddleDetPreprocessor(const std::string &config_file);
  32. /** \brief Implement the virtual function of ProcessorManager, Apply() is the
  33. * body of Run(). Apply() contains the main logic of preprocessing, Run() is
  34. * called by users to execute preprocessing
  35. *
  36. * \param[in] image_batch The input image batch
  37. * \param[in] outputs The output tensors which will feed in runtime
  38. * \return true if the preprocess succeeded, otherwise false
  39. */
  40. virtual bool Apply(FDMatBatch *image_batch, std::vector<FDTensor> *outputs);
  41. /// This function will disable normalize in preprocessing step.
  42. void DisableNormalize();
  43. /// This function will disable hwc2chw in preprocessing step.
  44. void DisablePermute();
  45. std::string GetArch() { return arch_; }
  46. private:
  47. bool BuildPreprocessPipelineFromConfig();
  48. std::vector<std::shared_ptr<Processor>> processors_;
  49. std::shared_ptr<PadToSize> pad_op_ =
  50. std::make_shared<PadToSize>(0, 0, std::vector<float>(3, 0));
  51. bool initialized_ = false;
  52. // for recording the switch of hwc2chw
  53. bool disable_permute_ = false;
  54. // for recording the switch of normalize
  55. bool disable_normalize_ = false;
  56. // read config file
  57. std::string config_file_;
  58. // read arch_ for postprocess
  59. std::string arch_;
  60. };
  61. } // namespace detection
  62. } // namespace vision
  63. } // namespace ultra_infer