rknpu2_backend.h 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #pragma once
  15. #include "rknn_api.h" // NOLINT
  16. #include "ultra_infer/core/fd_tensor.h"
  17. #include "ultra_infer/runtime/backends/backend.h"
  18. #include "ultra_infer/runtime/backends/rknpu2/option.h"
  19. #include <cstring>
  20. #include <iostream>
  21. #include <memory>
  22. #include <string>
  23. #include <vector>
  24. namespace ultra_infer {
  25. class RKNPU2Backend : public BaseBackend {
  26. public:
  27. /***************************** BaseBackend API *****************************/
  28. RKNPU2Backend() = default;
  29. virtual ~RKNPU2Backend();
  30. bool Init(const RuntimeOption &runtime_option);
  31. int NumInputs() const override {
  32. return static_cast<int>(inputs_desc_.size());
  33. }
  34. int NumOutputs() const override {
  35. return static_cast<int>(outputs_desc_.size());
  36. }
  37. TensorInfo GetInputInfo(int index) override;
  38. TensorInfo GetOutputInfo(int index) override;
  39. std::vector<TensorInfo> GetInputInfos() override;
  40. std::vector<TensorInfo> GetOutputInfos() override;
  41. bool Infer(std::vector<FDTensor> &inputs, std::vector<FDTensor> *outputs,
  42. bool copy_to_fd = true) override;
  43. /***************************** BaseBackend API *****************************/
  44. private:
  45. /*
  46. * @name RuntimeOptionIsApplicable
  47. * @brief This function is used to determine whether the RuntimeOption
  48. * meets the operating conditions of RKNPU2.
  49. * @param None
  50. * @return bool
  51. * @note None
  52. */
  53. bool RuntimeOptionIsApplicable(const RuntimeOption &runtime_option);
  54. /*
  55. * @name LoadModel
  56. * @brief Read the model and initialize rknn context.
  57. * @param model: Binary data for the RKNN model or the path of RKNN
  58. * model.
  59. * @return bool
  60. * @note None
  61. */
  62. bool LoadModel(void *model);
  63. /*
  64. * @name GetSDKAndDeviceVersion
  65. * @brief Get RKNPU2 sdk and device version.
  66. * @param None
  67. * @return bool
  68. * @note The private variable ctx must be initialized to use this
  69. * function.
  70. */
  71. bool GetSDKAndDeviceVersion();
  72. /*
  73. * @name BuildOption
  74. * @brief Save option and set core mask.
  75. * @param RKNPU2BackendOption
  76. * @note None
  77. */
  78. void BuildOption(const RKNPU2BackendOption &option);
  79. /*
  80. * @name SetCoreMask
  81. * @brief Set NPU core for model
  82. * @param core_mask: The specification of NPU core setting.
  83. * @return bool
  84. * @note Only support RK3588
  85. */
  86. bool SetCoreMask(const rknpu2::CoreMask &core_mask) const;
  87. /*
  88. * @name InitInputAndOutputNumber
  89. * @brief Initialize io_num_.
  90. * @param
  91. * @return bool
  92. * @note The private variable ctx must be initialized to use this
  93. * function.
  94. */
  95. bool InitInputAndOutputNumber();
  96. /*
  97. * @name InitRKNNTensorAddress
  98. * @brief Allocate memory for input_attrs_ and output_attrs_.
  99. * @param None
  100. * @return bool
  101. * @note None
  102. */
  103. bool InitRKNNTensorAddress();
  104. /*
  105. * @name InitInputAndOutputInformation
  106. * @brief Initialize inputs_desc_ and outputs_desc_.
  107. * @param None
  108. * @return bool
  109. * @note None
  110. */
  111. bool InitInputAndOutputInformation();
  112. /*
  113. * @name InitRKNNTensorMemory
  114. * @brief Allocate memory for input and output tensors.
  115. * @param std::vector<FDTensor>& inputs
  116. * @return None
  117. * @note None
  118. */
  119. bool InitRKNNTensorMemory(std::vector<FDTensor> &inputs);
  120. rknn_context ctx_{};
  121. rknn_sdk_version sdk_ver_{};
  122. rknn_input_output_num io_num_{0, 0};
  123. std::vector<TensorInfo> inputs_desc_;
  124. std::vector<TensorInfo> outputs_desc_;
  125. rknn_tensor_attr *input_attrs_ = nullptr;
  126. rknn_tensor_attr *output_attrs_ = nullptr;
  127. std::vector<rknn_tensor_mem *> input_mems_;
  128. std::vector<rknn_tensor_mem *> output_mems_;
  129. bool io_num_init_ = false;
  130. bool tensor_attrs_init_ = false;
  131. bool tensor_memory_init_ = false;
  132. RKNPU2BackendOption option_;
  133. /*
  134. * @name DumpTensorAttr
  135. * @brief Get the model's detailed inputs and outputs
  136. * @param rknn_tensor_attr
  137. * @return None
  138. * @note None
  139. */
  140. void DumpTensorAttr(rknn_tensor_attr &attr);
  141. /*
  142. * @name RknnTensorTypeToFDDataType
  143. * @brief Change RknnTensorType To FDDataType
  144. * @param rknn_tensor_type
  145. * @return None
  146. * @note Most post-processing does not support the fp16 format.
  147. * Therefore, if the input is FP16, the output will be FP32.
  148. */
  149. FDDataType RknnTensorTypeToFDDataType(rknn_tensor_type type);
  150. /*
  151. * @name FDDataTypeToRknnTensorType
  152. * @brief Change FDDataType To RknnTensorType
  153. * @param FDDataType
  154. * @return None
  155. * @note None
  156. */
  157. rknn_tensor_type FDDataTypeToRknnTensorType(FDDataType type);
  158. };
  159. } // namespace ultra_infer