model.h 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/ultra_infer_model.h"
  16. #include "ultra_infer/vision/classification/ppcls/postprocessor.h"
  17. #include "ultra_infer/vision/classification/ppcls/preprocessor.h"
  18. namespace ultra_infer {
  19. namespace vision {
  20. /** \brief All classification model APIs are defined inside this namespace
  21. *
  22. */
  23. namespace classification {
  24. /*! @brief PaddleClas serials model object used when to load a PaddleClas model
  25. * exported by PaddleClas repository
  26. */
  27. class ULTRAINFER_DECL PaddleClasModel : public UltraInferModel {
  28. public:
  29. /** \brief Set path of model file and configuration file, and the
  30. * configuration of runtime
  31. *
  32. * \param[in] model_file Path of model file, e.g resnet/model.pdmodel
  33. * \param[in] params_file Path of parameter file, e.g resnet/model.pdiparams,
  34. * if the model format is ONNX, this parameter will be ignored \param[in]
  35. * config_file Path of configuration file for deployment, e.g
  36. * resnet/infer_cfg.yml \param[in] custom_option RuntimeOption for inference,
  37. * the default will use cpu, and choose the backend defined in
  38. * `valid_cpu_backends` \param[in] model_format Model format of the loaded
  39. * model, default is Paddle format
  40. */
  41. PaddleClasModel(const std::string &model_file, const std::string &params_file,
  42. const std::string &config_file,
  43. const RuntimeOption &custom_option = RuntimeOption(),
  44. const ModelFormat &model_format = ModelFormat::PADDLE);
  45. /** \brief Clone a new PaddleClasModel with less memory usage when multiple
  46. * instances of the same model are created
  47. *
  48. * \return new PaddleClasModel* type unique pointer
  49. */
  50. virtual std::unique_ptr<PaddleClasModel> Clone() const;
  51. /// Get model's name
  52. virtual std::string ModelName() const { return "PaddleClas/Model"; }
  53. /** \brief DEPRECATED Predict the classification result for an input image,
  54. * remove at 1.0 version
  55. *
  56. * \param[in] im The input image data, comes from cv::imread()
  57. * \param[in] result The output classification result will be written to this
  58. * structure \return true if the prediction succeeded, otherwise false
  59. */
  60. virtual bool Predict(cv::Mat *im, ClassifyResult *result, int topk = 1);
  61. /** \brief Predict the classification result for an input image
  62. *
  63. * \param[in] img The input image data, comes from cv::imread()
  64. * \param[in] result The output classification result
  65. * \return true if the prediction succeeded, otherwise false
  66. */
  67. virtual bool Predict(const cv::Mat &img, ClassifyResult *result);
  68. /** \brief Predict the classification results for a batch of input images
  69. *
  70. * \param[in] imgs, The input image list, each element comes from cv::imread()
  71. * \param[in] results The output classification result list
  72. * \return true if the prediction succeeded, otherwise false
  73. */
  74. virtual bool BatchPredict(const std::vector<cv::Mat> &imgs,
  75. std::vector<ClassifyResult> *results);
  76. /** \brief Predict the classification result for an input image
  77. *
  78. * \param[in] mat The input mat
  79. * \param[in] result The output classification result
  80. * \return true if the prediction succeeded, otherwise false
  81. */
  82. virtual bool Predict(const FDMat &mat, ClassifyResult *result);
  83. /** \brief Predict the classification results for a batch of input images
  84. *
  85. * \param[in] mats, The input mat list
  86. * \param[in] results The output classification result list
  87. * \return true if the prediction succeeded, otherwise false
  88. */
  89. virtual bool BatchPredict(const std::vector<FDMat> &mats,
  90. std::vector<ClassifyResult> *results);
  91. /// Get preprocessor reference of PaddleClasModel
  92. virtual PaddleClasPreprocessor &GetPreprocessor() { return preprocessor_; }
  93. /// Get postprocessor reference of PaddleClasModel
  94. virtual PaddleClasPostprocessor &GetPostprocessor() { return postprocessor_; }
  95. protected:
  96. bool Initialize();
  97. PaddleClasPreprocessor preprocessor_;
  98. PaddleClasPostprocessor postprocessor_;
  99. };
  100. typedef PaddleClasModel PPLCNet;
  101. typedef PaddleClasModel PPLCNetv2;
  102. typedef PaddleClasModel EfficientNet;
  103. typedef PaddleClasModel GhostNet;
  104. typedef PaddleClasModel MobileNetv1;
  105. typedef PaddleClasModel MobileNetv2;
  106. typedef PaddleClasModel MobileNetv3;
  107. typedef PaddleClasModel ShuffleNetv2;
  108. typedef PaddleClasModel SqueezeNet;
  109. typedef PaddleClasModel Inceptionv3;
  110. typedef PaddleClasModel PPHGNet;
  111. typedef PaddleClasModel ResNet50vd;
  112. typedef PaddleClasModel SwinTransformer;
  113. } // namespace classification
  114. } // namespace vision
  115. } // namespace ultra_infer