paddlex.h 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091
  1. // Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <functional>
  16. #include <iostream>
  17. #include <numeric>
  18. #include "yaml-cpp/yaml.h"
  19. #ifdef _WIN32
  20. #define OS_PATH_SEP "\\"
  21. #else
  22. #define OS_PATH_SEP "/"
  23. #endif
  24. #include "paddle_inference_api.h" // NOLINT
  25. #include "config_parser.h"
  26. #include "results.h"
  27. #include "transforms.h"
  28. #ifdef WITH_ENCRYPTION
  29. #include "paddle_model_decrypt.h"
  30. #include "model_code.h"
  31. #endif
  32. namespace PaddleX {
  33. class Model {
  34. public:
  35. void Init(const std::string& model_dir,
  36. bool use_gpu = false,
  37. bool use_trt = false,
  38. int gpu_id = 0,
  39. std::string key = "",
  40. int batch_size = 1) {
  41. create_predictor(model_dir, use_gpu, use_trt, gpu_id, key, batch_size);
  42. }
  43. void create_predictor(const std::string& model_dir,
  44. bool use_gpu = false,
  45. bool use_trt = false,
  46. int gpu_id = 0,
  47. std::string key = "",
  48. int batch_size = 1);
  49. bool load_config(const std::string& model_dir);
  50. bool preprocess(const cv::Mat& input_im, ImageBlob* blob);
  51. bool preprocess(const std::vector<cv::Mat> &input_im_batch, std::vector<ImageBlob> &blob_batch);
  52. bool predict(const cv::Mat& im, ClsResult* result);
  53. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<ClsResult> &results);
  54. bool predict(const cv::Mat& im, DetResult* result);
  55. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<DetResult> &result);
  56. bool predict(const cv::Mat& im, SegResult* result);
  57. bool predict(const std::vector<cv::Mat> &im_batch, std::vector<SegResult> &result);
  58. bool postprocess(SegResult* result);
  59. bool postprocess(DetResult* result);
  60. std::string type;
  61. std::string name;
  62. std::map<int, std::string> labels;
  63. Transforms transforms_;
  64. ImageBlob inputs_;
  65. std::vector<ImageBlob> inputs_batch_;
  66. std::vector<float> outputs_;
  67. std::unique_ptr<paddle::PaddlePredictor> predictor_;
  68. };
  69. } // namespce of PaddleX