ppshituv2_rec.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/ultra_infer_model.h"
  16. #include "ultra_infer/vision/classification/ppshitu/ppshituv2_rec_postprocessor.h"
  17. #include "ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.h"
  18. namespace ultra_infer {
  19. namespace vision {
  20. namespace classification {
  21. /*! @brief PPShiTuV2Recognizer model object used when to load a
  22. * PPShiTuV2Recognizer model exported by PP-ShiTuV2 Rec model.
  23. */
  24. class ULTRAINFER_DECL PPShiTuV2Recognizer : public UltraInferModel {
  25. public:
  26. /** \brief Set path of model file and configuration file, and the
  27. * configuration of runtime
  28. *
  29. * \param[in] model_file Path of model file, e.g PPLCNet/inference.pdmodel
  30. * \param[in] params_file Path of parameter file, e.g
  31. * PPLCNet/inference.pdiparams, if the model format is ONNX, this parameter
  32. * will be ignored \param[in] config_file Path of configuration file for
  33. * deployment, e.g PPLCNet/inference_cls.yml \param[in] custom_option
  34. * RuntimeOption for inference, the default will use cpu, and choose the
  35. * backend defined in `valid_cpu_backends` \param[in] model_format Model
  36. * format of the loaded model, default is Paddle format
  37. */
  38. PPShiTuV2Recognizer(const std::string &model_file,
  39. const std::string &params_file,
  40. const std::string &config_file,
  41. const RuntimeOption &custom_option = RuntimeOption(),
  42. const ModelFormat &model_format = ModelFormat::PADDLE);
  43. /** \brief Clone a new PPShiTuV2Recognizer with less memory usage when
  44. * multiple instances of the same model are created
  45. *
  46. * \return new PPShiTuV2Recognizer* type unique pointer
  47. */
  48. virtual std::unique_ptr<PPShiTuV2Recognizer> Clone() const;
  49. /// Get model's name
  50. virtual std::string ModelName() const { return "PPShiTuV2Recognizer"; }
  51. /** \brief DEPRECATED Predict the feature vector result for an input image,
  52. * remove at 1.0 version
  53. *
  54. * \param[in] im The input image data, comes from cv::imread()
  55. * \param[in] result The output feature vector result will be written to this
  56. * structure \return true if the prediction succeeded, otherwise false
  57. */
  58. virtual bool Predict(cv::Mat *im, ClassifyResult *result);
  59. /** \brief Predict the classification result for an input image
  60. *
  61. * \param[in] img The input image data, comes from cv::imread()
  62. * \param[in] result The output feature vector result
  63. * \return true if the prediction succeeded, otherwise false
  64. */
  65. virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
  66. /** \brief Predict the feature vector results for a batch of input images
  67. *
  68. * \param[in] imgs, The input image list, each element comes from cv::imread()
  69. * \param[in] results The output feature vector(namely ClassifyResult.feature)
  70. * result list \return true if the prediction succeeded, otherwise false
  71. */
  72. virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
  73. std::vector<ClassifyResult> *results);
  74. /** \brief Predict the feature vector result for an input image
  75. *
  76. * \param[in] mat The input mat
  77. * \param[in] result The output feature vector result
  78. * \return true if the prediction succeeded, otherwise false
  79. */
  80. virtual bool Predict(const FDMat &mat, ClassifyResult *result);
  81. /** \brief Predict the feature vector results for a batch of input images
  82. *
  83. * \param[in] mats, The input mat list
  84. * \param[in] results The output feature vector result list
  85. * \return true if the prediction succeeded, otherwise false
  86. */
  87. virtual bool BatchPredict(const std::vector<FDMat> &mats,
  88. std::vector<ClassifyResult> *results);
  89. /// Get preprocessor reference of PPShiTuV2Recognizer
  90. virtual PPShiTuV2RecognizerPreprocessor &GetPreprocessor() {
  91. return preprocessor_;
  92. }
  93. /// Get postprocessor reference of PPShiTuV2Recognizer
  94. virtual PPShiTuV2RecognizerPostprocessor &GetPostprocessor() {
  95. return postprocessor_;
  96. }
  97. protected:
  98. bool Initialize();
  99. PPShiTuV2RecognizerPreprocessor preprocessor_;
  100. PPShiTuV2RecognizerPostprocessor postprocessor_;
  101. };
  102. } // namespace classification
  103. } // namespace vision
  104. } // namespace ultra_infer