lite_backend.cc 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/backends/lite/lite_backend.h"
  15. // https://github.com/PaddlePaddle/Paddle-Lite/issues/8290
  16. // When compiling the UltraInfer dynamic library, namely,
  17. // WITH_STATIC_LIB=OFF, and depending on the Paddle Lite
  18. // static library, you need to include the fake registration
  19. // codes of Paddle Lite. When you compile the UltraInfer static
  20. // library and depends on the Paddle Lite static library,
  21. // WITH_STATIC_LIB=ON, you do not need to include the fake
  22. // registration codes for Paddle Lite, but wait until you
  23. // use the UltraInfer static library.
  24. #if (defined(WITH_LITE_STATIC) && (!defined(WITH_STATIC_LIB)))
  25. #warning You are compiling the UltraInfer dynamic library with \
  26. Paddle Lite static lib We will automatically add some registration \
  27. codes for ops, kernels and passes for Paddle Lite.
  28. #include "paddle_use_kernels.h" // NOLINT
  29. #include "paddle_use_ops.h" // NOLINT
  30. #include "paddle_use_passes.h" // NOLINT
  31. #endif
  32. #include <cstring>
  33. namespace ultra_infer {
  34. void LiteBackend::BuildOption(const LiteBackendOption &option) {
  35. option_ = option;
  36. if (option_.device == Device::CPU) {
  37. ConfigureCpu(option_);
  38. } else if (option_.device == Device::GPU) {
  39. ConfigureGpu(option_);
  40. } else if (option_.device == Device::TIMVX) {
  41. ConfigureTimvx(option_);
  42. } else if (option_.device == Device::KUNLUNXIN) {
  43. ConfigureKunlunXin(option_);
  44. } else if (option_.device == Device::ASCEND) {
  45. ConfigureAscend(option_);
  46. }
  47. if (option_.cpu_threads > 0) {
  48. config_.set_threads(option_.cpu_threads);
  49. }
  50. if (option_.power_mode > 0) {
  51. config_.set_power_mode(
  52. static_cast<paddle::lite_api::PowerMode>(option_.power_mode));
  53. }
  54. }
  55. bool LiteBackend::Init(const RuntimeOption &runtime_option) {
  56. if (initialized_) {
  57. FDERROR << "LiteBackend is already initialized, cannot initialize again."
  58. << std::endl;
  59. return false;
  60. }
  61. if (runtime_option.model_format != ModelFormat::PADDLE) {
  62. FDERROR
  63. << "PaddleLiteBackend only supports model format PADDLE, but now it's "
  64. << runtime_option.model_format << "." << std::endl;
  65. return false;
  66. }
  67. if (runtime_option.device != Device::CPU &&
  68. runtime_option.device != Device::GPU &&
  69. runtime_option.device != Device::KUNLUNXIN &&
  70. runtime_option.device != Device::ASCEND &&
  71. runtime_option.device != Device::TIMVX) {
  72. FDERROR << "PaddleLiteBackend only supports "
  73. "Device::CPU/Device::GPU/Device::TIMVX/Device::KUNLUNXIN/"
  74. "Device::ASCEND, "
  75. "but now it's "
  76. << runtime_option.device << "." << std::endl;
  77. return false;
  78. }
  79. if (runtime_option.device == Device::GPU &&
  80. !paddle::lite_api::IsOpenCLBackendValid()) {
  81. FDERROR << "PaddleLiteBackend GPU (OpenCL) is not supported by the current "
  82. "device."
  83. << std::endl;
  84. }
  85. if (runtime_option.model_from_memory_) {
  86. FDERROR << "PaddleLiteBackend doesn't support load model from memory, "
  87. "please load model from disk."
  88. << std::endl;
  89. return false;
  90. }
  91. if (runtime_option.params_file == "") {
  92. // Use light api for Arm CPU via MobileConfig.
  93. FDASSERT(
  94. runtime_option.device == Device::CPU,
  95. "In UltraInfer, Paddle Lite light API is only support for Arm CPU now!")
  96. mobile_config_.set_model_from_file(runtime_option.model_file);
  97. mobile_config_.set_threads(runtime_option.paddle_lite_option.cpu_threads);
  98. mobile_config_.set_power_mode(static_cast<paddle::lite_api::PowerMode>(
  99. runtime_option.paddle_lite_option.power_mode));
  100. // TODO(qiuyanjun): Add OpenCL support for mobile gpu.
  101. // Paddle-Lite/blob/develop/lite/api/tools/benchmark/benchmark.h#L265
  102. // mobile_config_.set_opencl_tune(
  103. // tune_mode, opencl_cache_dir, opencl_tuned_file);
  104. // mobile_config_.set_opencl_precision(gpu_precision);
  105. predictor_ =
  106. paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::MobileConfig>(
  107. mobile_config_);
  108. } else {
  109. // Use full api for many hardwares via CxxConfig.
  110. config_.set_model_file(runtime_option.model_file);
  111. config_.set_param_file(runtime_option.params_file);
  112. BuildOption(runtime_option.paddle_lite_option);
  113. predictor_ =
  114. paddle::lite_api::CreatePaddlePredictor<paddle::lite_api::CxxConfig>(
  115. config_);
  116. if (option_.optimized_model_dir != "") {
  117. FDINFO
  118. << "Optimized model dir is not empty, will save optimized model to: "
  119. << option_.optimized_model_dir << std::endl;
  120. predictor_->SaveOptimizedModel(
  121. option_.optimized_model_dir,
  122. paddle::lite_api::LiteModelType::kNaiveBuffer);
  123. }
  124. }
  125. inputs_desc_.clear();
  126. outputs_desc_.clear();
  127. inputs_order_.clear();
  128. std::vector<std::string> input_names = predictor_->GetInputNames();
  129. std::vector<std::string> output_names = predictor_->GetOutputNames();
  130. for (size_t i = 0; i < input_names.size(); ++i) {
  131. inputs_order_[input_names[i]] = i;
  132. TensorInfo info;
  133. auto tensor = predictor_->GetInput(i);
  134. auto shape = tensor->shape();
  135. info.shape.assign(shape.begin(), shape.end());
  136. info.name = input_names[i];
  137. info.dtype = LiteDataTypeToFD(tensor->precision());
  138. inputs_desc_.emplace_back(info);
  139. }
  140. for (size_t i = 0; i < output_names.size(); ++i) {
  141. TensorInfo info;
  142. auto tensor = predictor_->GetOutput(i);
  143. auto shape = tensor->shape();
  144. info.shape.assign(shape.begin(), shape.end());
  145. info.name = output_names[i];
  146. if (option_.device != Device::KUNLUNXIN) {
  147. info.dtype = LiteDataTypeToFD(tensor->precision());
  148. }
  149. outputs_desc_.emplace_back(info);
  150. }
  151. initialized_ = true;
  152. return true;
  153. }
  154. TensorInfo LiteBackend::GetInputInfo(int index) {
  155. FDASSERT(index < NumInputs(),
  156. "The index: %d should less than the number of inputs: %d.", index,
  157. NumInputs());
  158. return inputs_desc_[index];
  159. }
  160. std::vector<TensorInfo> LiteBackend::GetInputInfos() { return inputs_desc_; }
  161. TensorInfo LiteBackend::GetOutputInfo(int index) {
  162. FDASSERT(index < NumOutputs(),
  163. "The index: %d should less than the number of outputs %d.", index,
  164. NumOutputs());
  165. return outputs_desc_[index];
  166. }
  167. std::vector<TensorInfo> LiteBackend::GetOutputInfos() { return outputs_desc_; }
  168. bool LiteBackend::Infer(std::vector<FDTensor> &inputs,
  169. std::vector<FDTensor> *outputs, bool copy_to_fd) {
  170. if (inputs.size() != inputs_desc_.size()) {
  171. FDERROR << "[LiteBackend] Size of inputs(" << inputs.size()
  172. << ") should keep same with the inputs of this model("
  173. << inputs_desc_.size() << ")." << std::endl;
  174. return false;
  175. }
  176. RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
  177. for (size_t i = 0; i < inputs.size(); ++i) {
  178. auto iter = inputs_order_.find(inputs[i].name);
  179. if (iter == inputs_order_.end()) {
  180. FDERROR << "Cannot find input with name:" << inputs[i].name
  181. << " in loaded model." << std::endl;
  182. return false;
  183. }
  184. auto tensor = predictor_->GetInput(iter->second);
  185. // Adjust dims only, allocate lazy.
  186. tensor->Resize(inputs[i].shape);
  187. if (inputs[i].dtype == FDDataType::FP32) {
  188. tensor->CopyFromCpu<float, paddle::lite_api::TargetType::kHost>(
  189. reinterpret_cast<const float *>(
  190. const_cast<void *>(inputs[i].CpuData())));
  191. } else if (inputs[i].dtype == FDDataType::INT32) {
  192. tensor->CopyFromCpu<int, paddle::lite_api::TargetType::kHost>(
  193. reinterpret_cast<const int *>(
  194. const_cast<void *>(inputs[i].CpuData())));
  195. } else if (inputs[i].dtype == FDDataType::INT8) {
  196. tensor->CopyFromCpu<int8_t, paddle::lite_api::TargetType::kHost>(
  197. reinterpret_cast<const int8_t *>(
  198. const_cast<void *>(inputs[i].CpuData())));
  199. } else if (inputs[i].dtype == FDDataType::UINT8) {
  200. tensor->CopyFromCpu<uint8_t, paddle::lite_api::TargetType::kHost>(
  201. reinterpret_cast<const uint8_t *>(
  202. const_cast<void *>(inputs[i].CpuData())));
  203. } else if (inputs[i].dtype == FDDataType::INT64) {
  204. #if (defined(__aarch64__) || defined(__x86_64__) || defined(_M_X64) || \
  205. defined(_M_ARM64))
  206. tensor->CopyFromCpu<int64_t, paddle::lite_api::TargetType::kHost>(
  207. reinterpret_cast<const int64_t *>(
  208. const_cast<void *>(inputs[i].CpuData())));
  209. #else
  210. FDASSERT(false, "FDDataType::INT64 is not support for x86/armv7 now!");
  211. #endif
  212. } else {
  213. FDASSERT(false, "Unexpected data type of %d.", inputs[i].dtype);
  214. }
  215. }
  216. RUNTIME_PROFILE_LOOP_BEGIN(1)
  217. predictor_->Run();
  218. RUNTIME_PROFILE_LOOP_END
  219. outputs->resize(outputs_desc_.size());
  220. for (size_t i = 0; i < outputs_desc_.size(); ++i) {
  221. auto tensor = predictor_->GetOutput(i);
  222. if (outputs_desc_[i].dtype != LiteDataTypeToFD(tensor->precision())) {
  223. outputs_desc_[i].dtype = LiteDataTypeToFD(tensor->precision());
  224. }
  225. (*outputs)[i].Resize(tensor->shape(), outputs_desc_[i].dtype,
  226. outputs_desc_[i].name);
  227. memcpy((*outputs)[i].MutableData(), tensor->data<void>(),
  228. (*outputs)[i].Nbytes());
  229. }
  230. RUNTIME_PROFILE_LOOP_H2D_D2H_END
  231. return true;
  232. }
  233. bool ReadFile(const std::string &filename, std::vector<char> *contents,
  234. bool binary) {
  235. FILE *fp = fopen(filename.c_str(), binary ? "rb" : "r");
  236. if (!fp) {
  237. FDERROR << "Cannot open file " << filename << "." << std::endl;
  238. return false;
  239. }
  240. fseek(fp, 0, SEEK_END);
  241. size_t size = ftell(fp);
  242. fseek(fp, 0, SEEK_SET);
  243. contents->clear();
  244. contents->resize(size);
  245. size_t offset = 0;
  246. char *ptr = reinterpret_cast<char *>(&(contents->at(0)));
  247. while (offset < size) {
  248. size_t already_read = fread(ptr, 1, size - offset, fp);
  249. offset += already_read;
  250. ptr += already_read;
  251. }
  252. fclose(fp);
  253. return true;
  254. }
  255. // Convert data type from paddle lite to ultra_infer
  256. FDDataType LiteDataTypeToFD(const paddle::lite_api::PrecisionType &dtype) {
  257. if (dtype == paddle::lite_api::PrecisionType::kFloat) {
  258. return FDDataType::FP32;
  259. } else if (dtype == paddle::lite_api::PrecisionType::kInt8) {
  260. return FDDataType::INT8;
  261. } else if (dtype == paddle::lite_api::PrecisionType::kInt32) {
  262. return FDDataType::INT32;
  263. } else if (dtype == paddle::lite_api::PrecisionType::kInt64) {
  264. return FDDataType::INT64;
  265. } else if (dtype == paddle::lite_api::PrecisionType::kInt16) {
  266. return FDDataType::INT16;
  267. } else if (dtype == paddle::lite_api::PrecisionType::kUInt8) {
  268. return FDDataType::UINT8;
  269. } else if (dtype == paddle::lite_api::PrecisionType::kFP64) {
  270. return FDDataType::FP64;
  271. }
  272. FDASSERT(false, "Unexpected data type of %s.",
  273. paddle::lite_api::PrecisionToStr(dtype).c_str());
  274. return FDDataType::FP32;
  275. }
  276. } // namespace ultra_infer