animegan.h 3.1 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "ultra_infer/ultra_infer_model.h"
  16. #include "ultra_infer/vision/common/processors/transform.h"
  17. #include "ultra_infer/vision/generation/contrib/postprocessor.h"
  18. #include "ultra_infer/vision/generation/contrib/preprocessor.h"
  19. namespace ultra_infer {
  20. namespace vision {
  21. namespace generation {
  22. /*! @brief AnimeGAN model object is used when load a AnimeGAN model.
  23. */
  24. class ULTRAINFER_DECL AnimeGAN : public UltraInferModel {
  25. public:
  26. /** \brief Set path of model file and the configuration of runtime.
  27. *
  28. * \param[in] model_file Path of model file, e.g ./model.pdmodel
  29. * \param[in] params_file Path of parameter file, e.g ./model.pdiparams, if
  30. * the model format is ONNX, this parameter will be ignored \param[in]
  31. * custom_option RuntimeOption for inference, the default will use cpu, and
  32. * choose the backend defined in "valid_cpu_backends" \param[in] model_format
  33. * Model format of the loaded model, default is PADDLE format
  34. */
  35. AnimeGAN(const std::string &model_file, const std::string &params_file = "",
  36. const RuntimeOption &custom_option = RuntimeOption(),
  37. const ModelFormat &model_format = ModelFormat::PADDLE);
  38. std::string ModelName() const { return "styletransfer/animegan"; }
  39. /** \brief Predict the style transfer result for an input image
  40. *
  41. * \param[in] im The input image data, comes from cv::imread(), is a 3-D array
  42. * with layout HWC, BGR format \param[in] result The output style transfer
  43. * result will be written to this structure \return true if the prediction
  44. * succeeded, otherwise false
  45. */
  46. bool Predict(cv::Mat &img, cv::Mat *result);
  47. /** \brief Predict the style transfer result for a batch of input images
  48. *
  49. * \param[in] images The list of input images, each element comes from
  50. * cv::imread(), is a 3-D array with layout HWC, BGR format \param[in] results
  51. * The list of output style transfer results will be written to this structure
  52. * \return true if the batch prediction succeeded, otherwise false
  53. */
  54. bool BatchPredict(const std::vector<cv::Mat> &images,
  55. std::vector<cv::Mat> *results);
  56. // Get preprocessor reference of AnimeGAN
  57. AnimeGANPreprocessor &GetPreprocessor() { return preprocessor_; }
  58. // Get postprocessor reference of AnimeGAN
  59. AnimeGANPostprocessor &GetPostprocessor() { return postprocessor_; }
  60. private:
  61. bool Initialize();
  62. AnimeGANPreprocessor preprocessor_;
  63. AnimeGANPostprocessor postprocessor_;
  64. };
  65. } // namespace generation
  66. } // namespace vision
  67. } // namespace ultra_infer