backend.h 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include <iostream>
  16. #include <memory>
  17. #include <string>
  18. #include <vector>
  19. #include "ultra_infer/benchmark/benchmark.h"
  20. #include "ultra_infer/core/fd_tensor.h"
  21. #include "ultra_infer/core/fd_type.h"
  22. #include "ultra_infer/runtime/runtime_option.h"
  23. namespace ultra_infer {
  24. /*! @brief Information of Tensor
  25. */
  26. struct TensorInfo {
  27. std::string name; ///< Name of tensor
  28. std::vector<int> shape; ///< Shape of tensor
  29. FDDataType dtype; ///< Data type of tensor
  30. friend std::ostream &operator<<(std::ostream &output,
  31. const TensorInfo &info) {
  32. output << "TensorInfo(name: " << info.name << ", shape: [";
  33. for (size_t i = 0; i < info.shape.size(); ++i) {
  34. if (i == info.shape.size() - 1) {
  35. output << info.shape[i];
  36. } else {
  37. output << info.shape[i] << ", ";
  38. }
  39. }
  40. output << "], dtype: " << Str(info.dtype) << ")";
  41. return output;
  42. }
  43. };
  44. class BaseBackend {
  45. public:
  46. bool initialized_ = false;
  47. BaseBackend() {}
  48. virtual ~BaseBackend() = default;
  49. virtual bool Initialized() const { return initialized_; }
  50. virtual bool Init(const RuntimeOption &option) {
  51. FDERROR << "Not Implement for " << option.backend << " in " << option.device
  52. << "." << std::endl;
  53. return false;
  54. }
  55. // Get number of inputs of the model
  56. virtual int NumInputs() const = 0;
  57. // Get number of outputs of the model
  58. virtual int NumOutputs() const = 0;
  59. // Get information of input tensor
  60. virtual TensorInfo GetInputInfo(int index) = 0;
  61. // Get information of output tensor
  62. virtual TensorInfo GetOutputInfo(int index) = 0;
  63. // Get information of all the input tensors
  64. virtual std::vector<TensorInfo> GetInputInfos() = 0;
  65. // Get information of all the output tensors
  66. virtual std::vector<TensorInfo> GetOutputInfos() = 0;
  67. // if copy_to_fd is true, copy memory data to FDTensor
  68. // else share memory to FDTensor(only Paddle、ORT、TRT、OpenVINO support it)
  69. virtual bool Infer(std::vector<FDTensor> &inputs,
  70. std::vector<FDTensor> *outputs,
  71. bool copy_to_fd = true) = 0;
  72. // Optional: For those backends which can share memory
  73. // while creating multiple inference engines with same model file
  74. virtual std::unique_ptr<BaseBackend> Clone(RuntimeOption &runtime_option,
  75. void *stream = nullptr,
  76. int device_id = -1) {
  77. FDERROR << "Clone no support " << runtime_option.backend << " " << stream
  78. << " " << device_id << std::endl;
  79. return nullptr;
  80. }
  81. benchmark::BenchmarkOption benchmark_option_;
  82. benchmark::BenchmarkResult benchmark_result_;
  83. };
  84. /** \brief Macros for Runtime benchmark profiling.
  85. * The param 'base_loop' for 'RUNTIME_PROFILE_LOOP_BEGIN'
  86. * indicates that the least number of times the loop
  87. * will repeat when profiling mode is not enabled.
  88. * In most cases, the value should be 1, i.e., results are
  89. * obtained by running the inference process once, when
  90. * the profile mode is turned off, such as ONNX Runtime,
  91. * OpenVINO, TensorRT, Paddle Inference, Paddle Lite,
  92. * RKNPU2, SOPHGO etc.
  93. *
  94. * example code @code
  95. * // OpenVINOBackend::Infer
  96. * RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
  97. * // do something ....
  98. * RUNTIME_PROFILE_LOOP_BEGIN(1)
  99. * // The codes which wrapped by 'BEGIN(1) ~ END' scope
  100. * // will only run once when profiling mode is not enabled.
  101. * request_.infer();
  102. * RUNTIME_PROFILE_LOOP_END
  103. * // do something ....
  104. * RUNTIME_PROFILE_LOOP_H2D_D2H_END
  105. *
  106. * @endcode In this case, No global variables inside a function
  107. * are wrapped by BEGIN and END, which may be required for
  108. * subsequent tasks. But, some times we need to set 'base_loop'
  109. * as 0, such as POROS.
  110. *
  111. * * example code @code
  112. * // PorosBackend::Infer
  113. * RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
  114. * // do something ....
  115. * RUNTIME_PROFILE_LOOP_BEGIN(0) // set 'base_loop' as 0
  116. * // The codes which wrapped by 'BEGIN(0) ~ END' scope
  117. * // will not run when profiling mode is not enabled.
  118. * auto poros_outputs = _poros_module->forward(poros_inputs);
  119. * RUNTIME_PROFILE_LOOP_END
  120. * // Run another inference beyond the scope of 'BEGIN ~ END'
  121. * // to get valid outputs for subsequent tasks.
  122. * auto poros_outputs = _poros_module->forward(poros_inputs);
  123. * // do something .... will use 'poros_outputs' ...
  124. * if (poros_outputs.isTensor()) {
  125. * // ...
  126. * }
  127. * RUNTIME_PROFILE_LOOP_H2D_D2H_END
  128. *
  129. * @endcode In this case, 'poros_outputs' inside a function
  130. * are wrapped by BEGIN and END, which may be required for
  131. * subsequent tasks. So, we set 'base_loop' as 0 and launch
  132. * another infer to get the valid outputs beyond the scope
  133. * of 'BEGIN ~ END' for subsequent tasks.
  134. */
  135. #define RUNTIME_PROFILE_LOOP_BEGIN(base_loop) \
  136. __RUNTIME_PROFILE_LOOP_BEGIN(benchmark_option_, (base_loop))
  137. #define RUNTIME_PROFILE_LOOP_END __RUNTIME_PROFILE_LOOP_END(benchmark_result_)
  138. #define RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN \
  139. __RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN(benchmark_option_, 1)
  140. #define RUNTIME_PROFILE_LOOP_H2D_D2H_END \
  141. __RUNTIME_PROFILE_LOOP_H2D_D2H_END(benchmark_result_)
  142. } // namespace ultra_infer