paddle_backend.h 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <iostream>
  16. #include <memory>
  17. #include <string>
  18. #include <vector>
  19. #include "ultra_infer/runtime/backends/backend.h"
  20. #include "ultra_infer/runtime/backends/paddle/option.h"
  21. #ifdef ENABLE_PADDLE2ONNX
  22. #include "paddle2onnx/converter.h"
  23. #endif
  24. #include "paddle/include/paddle_inference_api.h" // NOLINT
  25. #include "ultra_infer/utils/unique_ptr.h"
  26. namespace ultra_infer {
  27. // convert FD device to paddle place type
  28. paddle_infer::PlaceType ConvertFDDeviceToPlace(Device device);
  29. // Share memory buffer with paddle_infer::Tensor from ultra_infer::FDTensor
  30. void ShareTensorFromFDTensor(paddle_infer::Tensor *tensor, FDTensor &fd_tensor);
  31. void ShareOutTensorFromFDTensor(paddle_infer::Tensor *tensor,
  32. FDTensor &fd_tensor);
  33. // convert paddle_infer::Tensor to ultra_infer::FDTensor
  34. // if copy_to_fd is true, copy memory data to FDTensor
  35. /// else share memory to FDTensor
  36. void PaddleTensorToFDTensor(std::unique_ptr<paddle_infer::Tensor> &tensor,
  37. FDTensor *fd_tensor, bool copy_to_fd);
  38. // Convert data type from paddle inference to ultra_infer
  39. FDDataType PaddleDataTypeToFD(const paddle_infer::DataType &dtype);
  40. // Convert data type from paddle2onnx::PaddleReader to ultra_infer
  41. FDDataType ReaderDataTypeToFD(int32_t dtype);
  42. class PaddleBackend : public BaseBackend {
  43. public:
  44. PaddleBackend() {}
  45. virtual ~PaddleBackend() = default;
  46. bool Init(const RuntimeOption &option);
  47. bool Infer(std::vector<FDTensor> &inputs, std::vector<FDTensor> *outputs,
  48. bool copy_to_fd = true) override;
  49. int NumInputs() const override { return inputs_desc_.size(); }
  50. int NumOutputs() const override { return outputs_desc_.size(); }
  51. std::unique_ptr<BaseBackend> Clone(RuntimeOption &runtime_option,
  52. void *stream = nullptr,
  53. int device_id = -1) override;
  54. TensorInfo GetInputInfo(int index) override;
  55. TensorInfo GetOutputInfo(int index) override;
  56. std::vector<TensorInfo> GetInputInfos() override;
  57. std::vector<TensorInfo> GetOutputInfos() override;
  58. private:
  59. void BuildOption(const PaddleBackendOption &option);
  60. bool
  61. InitFromPaddle(const std::string &model, const std::string &params,
  62. bool model_from_memory,
  63. const PaddleBackendOption &option = PaddleBackendOption());
  64. void
  65. CollectShapeRun(paddle_infer::Predictor *predictor,
  66. const std::map<std::string, std::vector<int>> &shape,
  67. const std::map<std::string, std::vector<float>> &data) const;
  68. void GetDynamicShapeFromOption(
  69. const PaddleBackendOption &option,
  70. std::map<std::string, std::vector<int>> *max_shape,
  71. std::map<std::string, std::vector<int>> *min_shape,
  72. std::map<std::string, std::vector<int>> *opt_shape) const;
  73. void GetInputDataFromOption(
  74. const PaddleBackendOption &option,
  75. std::map<std::string, std::vector<float>> *max_input_data,
  76. std::map<std::string, std::vector<float>> *min_input_data,
  77. std::map<std::string, std::vector<float>> *opt_input_data) const;
  78. void SetTRTDynamicShapeToConfig(const PaddleBackendOption &option);
  79. PaddleBackendOption option_;
  80. paddle_infer::Config config_;
  81. std::shared_ptr<paddle_infer::Predictor> predictor_;
  82. std::vector<TensorInfo> inputs_desc_;
  83. std::vector<TensorInfo> outputs_desc_;
  84. };
  85. } // namespace ultra_infer