ov_backend.cc 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/backends/openvino/ov_backend.h"
  15. #ifdef ENABLE_PADDLE2ONNX
  16. #include "paddle2onnx/converter.h"
  17. #endif
  18. namespace ultra_infer {
  19. std::vector<int64_t> PartialShapeToVec(const ov::PartialShape &shape) {
  20. std::vector<int64_t> res;
  21. for (int i = 0; i < shape.size(); ++i) {
  22. auto dim = shape[i];
  23. if (dim.is_dynamic()) {
  24. res.push_back(-1);
  25. } else {
  26. res.push_back(dim.get_length());
  27. }
  28. }
  29. return res;
  30. }
  31. ov::PartialShape VecToPartialShape(const std::vector<int64_t> &shape) {
  32. std::vector<ov::Dimension> dims;
  33. for (size_t i = 0; i < shape.size(); ++i) {
  34. dims.emplace_back(ov::Dimension(shape[i]));
  35. }
  36. return ov::PartialShape(dims);
  37. }
  38. FDDataType OpenVINODataTypeToFD(const ov::element::Type &type) {
  39. if (type == ov::element::f32) {
  40. return FDDataType::FP32;
  41. } else if (type == ov::element::f16) {
  42. return FDDataType::FP16;
  43. } else if (type == ov::element::f64) {
  44. return FDDataType::FP64;
  45. } else if (type == ov::element::i8) {
  46. return FDDataType::INT8;
  47. } else if (type == ov::element::u8) {
  48. return FDDataType::UINT8;
  49. } else if (type == ov::element::i32) {
  50. return FDDataType::INT32;
  51. } else if (type == ov::element::i64) {
  52. return FDDataType::INT64;
  53. } else {
  54. FDASSERT(false, "Only support float/double/int8/int32/int64/float16 now.");
  55. }
  56. return FDDataType::FP32;
  57. }
  58. ov::element::Type FDDataTypeToOV(const FDDataType &type) {
  59. if (type == FDDataType::FP32) {
  60. return ov::element::f32;
  61. } else if (type == FDDataType::FP64) {
  62. return ov::element::f64;
  63. } else if (type == FDDataType::INT8) {
  64. return ov::element::i8;
  65. } else if (type == FDDataType::UINT8) {
  66. return ov::element::u8;
  67. } else if (type == FDDataType::INT32) {
  68. return ov::element::i32;
  69. } else if (type == FDDataType::INT64) {
  70. return ov::element::i64;
  71. } else if (type == FDDataType::FP16) {
  72. return ov::element::f16;
  73. }
  74. FDASSERT(false,
  75. "Only support float/double/int8/uint8/int32/int64/float16 now.");
  76. return ov::element::f32;
  77. }
  78. ov::Core OpenVINOBackend::core_;
  79. void OpenVINOBackend::InitTensorInfo(
  80. const std::vector<ov::Output<ov::Node>> &ov_outputs,
  81. std::map<std::string, TensorInfo> *tensor_infos) {
  82. for (size_t i = 0; i < ov_outputs.size(); ++i) {
  83. TensorInfo info;
  84. auto partial_shape = PartialShapeToVec(ov_outputs[i].get_partial_shape());
  85. info.shape.assign(partial_shape.begin(), partial_shape.end());
  86. info.dtype = OpenVINODataTypeToFD(ov_outputs[i].get_element_type());
  87. auto names = ov_outputs[i].get_names();
  88. for (const auto &name : names) {
  89. info.name = name;
  90. tensor_infos->insert(std::make_pair(info.name, info));
  91. }
  92. }
  93. }
  94. bool OpenVINOBackend::Init(const RuntimeOption &option) {
  95. if (option.model_from_memory_) {
  96. FDERROR << "OpenVINOBackend doesn't support load model from memory, please "
  97. "load model from disk."
  98. << std::endl;
  99. return false;
  100. }
  101. if (option.device != Device::CPU) {
  102. FDERROR << "OpenVINOBackend only supports Device::CPU, but now its "
  103. << option.device << "." << std::endl;
  104. return false;
  105. }
  106. if (option.model_format == ModelFormat::PADDLE) {
  107. return InitFromPaddle(option.model_file, option.params_file,
  108. option.openvino_option);
  109. } else if (option.model_format == ModelFormat::ONNX) {
  110. return InitFromOnnx(option.model_file, option.openvino_option);
  111. } else {
  112. FDERROR << "OpenVINOBackend only supports model format Paddle/ONNX, but "
  113. "now its "
  114. << option.model_format << std::endl;
  115. return false;
  116. }
  117. return false;
  118. }
  119. bool OpenVINOBackend::InitFromPaddle(const std::string &model_file,
  120. const std::string &params_file,
  121. const OpenVINOBackendOption &option) {
  122. if (initialized_) {
  123. FDERROR << "OpenVINOBackend is already initlized, cannot initialize again."
  124. << std::endl;
  125. return false;
  126. }
  127. option_ = option;
  128. std::shared_ptr<ov::Model> model = core_.read_model(model_file, params_file);
  129. if (option_.shape_infos.size() > 0) {
  130. std::map<std::string, ov::PartialShape> shape_infos;
  131. for (const auto &item : option_.shape_infos) {
  132. shape_infos[item.first] = VecToPartialShape(item.second);
  133. }
  134. model->reshape(shape_infos);
  135. }
  136. if (option_.device.find("HETERO") != std::string::npos) {
  137. auto supported_ops = core_.query_model(model, option_.device);
  138. for (auto &&op : model->get_ops()) {
  139. auto &affinity = supported_ops[op->get_friendly_name()];
  140. if (option_.cpu_operators.find(op->description()) !=
  141. option_.cpu_operators.end()) {
  142. op->get_rt_info()["affinity"] = "CPU";
  143. } else {
  144. op->get_rt_info()["affinity"] = affinity;
  145. }
  146. }
  147. }
  148. // Get inputs/outputs information from loaded model
  149. const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
  150. std::map<std::string, TensorInfo> input_infos;
  151. InitTensorInfo(inputs, &input_infos);
  152. const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
  153. std::map<std::string, TensorInfo> output_infos;
  154. InitTensorInfo(outputs, &output_infos);
  155. // OpenVINO model may not keep the same order with original model
  156. // So here will reorder it's inputs and outputs
  157. std::string model_content;
  158. ReadBinaryFromFile(model_file, &model_content);
  159. auto reader =
  160. paddle2onnx::PaddleReader(model_content.c_str(), model_content.size());
  161. if (reader.num_inputs != input_infos.size()) {
  162. FDERROR << "The number of inputs from PaddleReader:" << reader.num_inputs
  163. << " not equal to the number of inputs from OpenVINO:"
  164. << input_infos.size() << "." << std::endl;
  165. return false;
  166. }
  167. if (reader.num_outputs != output_infos.size()) {
  168. FDERROR << "The number of outputs from PaddleReader:" << reader.num_outputs
  169. << " not equal to the number of outputs from OpenVINO:"
  170. << output_infos.size() << "." << std::endl;
  171. return false;
  172. }
  173. for (int i = 0; i < reader.num_inputs; ++i) {
  174. auto iter = input_infos.find(std::string(reader.inputs[i].name));
  175. if (iter == input_infos.end()) {
  176. FDERROR << "Cannot find input name:" << reader.inputs[i].name
  177. << " from OpenVINO model." << std::endl;
  178. return false;
  179. }
  180. input_infos_.push_back(iter->second);
  181. }
  182. for (int i = 0; i < reader.num_outputs; ++i) {
  183. auto iter = output_infos.find(std::string(reader.outputs[i].name));
  184. if (iter == output_infos.end()) {
  185. FDERROR << "Cannot find output name:" << reader.outputs[i].name
  186. << " from OpenVINO model." << std::endl;
  187. return false;
  188. }
  189. output_infos_.push_back(iter->second);
  190. }
  191. ov::AnyMap properties;
  192. if (option_.hint == "UNDEFINED") {
  193. if (option_.device == "CPU" && option_.cpu_thread_num > 0) {
  194. properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num;
  195. }
  196. if (option_.num_streams == -1) {
  197. properties["NUM_STREAMS"] = ov::streams::AUTO;
  198. } else if (option_.num_streams == -2) {
  199. properties["NUM_STREAMS"] = ov::streams::NUMA;
  200. } else if (option_.num_streams > 0) {
  201. properties["NUM_STREAMS"] = option_.num_streams;
  202. }
  203. FDINFO << "number of streams:" << option_.num_streams << "." << std::endl;
  204. if (option_.affinity == "NO") {
  205. properties.emplace(ov::hint::enable_cpu_pinning(false));
  206. }
  207. FDINFO << "affinity:" << option_.affinity << "." << std::endl;
  208. } else if (option_.hint == "LATENCY") {
  209. properties.emplace(
  210. ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY));
  211. } else if (option_.hint == "THROUGHPUT") {
  212. properties.emplace(
  213. ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
  214. } else if (option_.hint == "CUMULATIVE_THROUGHPUT") {
  215. properties.emplace(ov::hint::performance_mode(
  216. ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
  217. }
  218. FDINFO << "Compile OpenVINO model on device_name:" << option.device << "."
  219. << std::endl;
  220. compiled_model_ = core_.compile_model(model, option.device, properties);
  221. request_ = compiled_model_.create_infer_request();
  222. initialized_ = true;
  223. return true;
  224. }
  225. TensorInfo OpenVINOBackend::GetInputInfo(int index) {
  226. FDASSERT(index < NumInputs(),
  227. "The index: %d should less than the number of outputs: %d.", index,
  228. NumOutputs());
  229. return input_infos_[index];
  230. }
  231. std::vector<TensorInfo> OpenVINOBackend::GetInputInfos() {
  232. return input_infos_;
  233. }
  234. std::vector<TensorInfo> OpenVINOBackend::GetOutputInfos() {
  235. return output_infos_;
  236. }
  237. TensorInfo OpenVINOBackend::GetOutputInfo(int index) {
  238. FDASSERT(index < NumOutputs(),
  239. "The index: %d should less than the number of outputs: %d.", index,
  240. NumOutputs());
  241. return output_infos_[index];
  242. }
  243. bool OpenVINOBackend::InitFromOnnx(const std::string &model_file,
  244. const OpenVINOBackendOption &option) {
  245. if (initialized_) {
  246. FDERROR << "OpenVINOBackend is already initlized, cannot initialize again."
  247. << std::endl;
  248. return false;
  249. }
  250. option_ = option;
  251. std::shared_ptr<ov::Model> model = core_.read_model(model_file);
  252. if (option_.shape_infos.size() > 0) {
  253. std::map<std::string, ov::PartialShape> shape_infos;
  254. for (const auto &item : option_.shape_infos) {
  255. shape_infos[item.first] = VecToPartialShape(item.second);
  256. }
  257. model->reshape(shape_infos);
  258. }
  259. if (option_.device.find("HETERO") != std::string::npos) {
  260. auto supported_ops = core_.query_model(model, option_.device);
  261. for (auto &&op : model->get_ops()) {
  262. auto &affinity = supported_ops[op->get_friendly_name()];
  263. if (option_.cpu_operators.find(op->description()) !=
  264. option_.cpu_operators.end()) {
  265. op->get_rt_info()["affinity"] = "CPU";
  266. } else {
  267. op->get_rt_info()["affinity"] = affinity;
  268. }
  269. }
  270. }
  271. // Get inputs/outputs information from loaded model
  272. const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
  273. std::map<std::string, TensorInfo> input_infos;
  274. InitTensorInfo(inputs, &input_infos);
  275. const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
  276. std::map<std::string, TensorInfo> output_infos;
  277. InitTensorInfo(outputs, &output_infos);
  278. // OpenVINO model may not keep the same order with original model
  279. // So here will reorder it's inputs and outputs
  280. std::string model_content;
  281. ReadBinaryFromFile(model_file, &model_content);
  282. auto reader =
  283. paddle2onnx::OnnxReader(model_content.c_str(), model_content.size());
  284. if (reader.num_inputs != input_infos.size()) {
  285. FDWARNING << "The number of input names from OnnxReader:"
  286. << reader.num_outputs
  287. << " not equal to the number of input names from OpenVINO:"
  288. << output_infos.size() << "." << std::endl;
  289. }
  290. if (reader.num_inputs != inputs.size()) {
  291. FDERROR << "The number of inputs from OnnxReader:" << reader.num_inputs
  292. << " not equal to the number of inputs from OpenVINO:"
  293. << inputs.size() << "." << std::endl;
  294. return false;
  295. }
  296. if (reader.num_outputs != output_infos.size()) {
  297. FDWARNING << "The number of output names from OnnxReader:"
  298. << reader.num_outputs
  299. << " not equal to the number of output names from OpenVINO:"
  300. << output_infos.size() << "." << std::endl;
  301. }
  302. if (reader.num_outputs != outputs.size()) {
  303. FDERROR << "The number of outputs from OnnxReader:" << reader.num_outputs
  304. << " not equal to the number of outputs from OpenVINO:"
  305. << outputs.size() << "." << std::endl;
  306. return false;
  307. }
  308. for (int i = 0; i < reader.num_inputs; ++i) {
  309. auto iter = input_infos.find(std::string(reader.inputs[i].name));
  310. if (iter == input_infos.end()) {
  311. FDERROR << "Cannot find input name:" << reader.inputs[i].name
  312. << " from OpenVINO model." << std::endl;
  313. return false;
  314. }
  315. input_infos_.push_back(iter->second);
  316. }
  317. for (int i = 0; i < reader.num_outputs; ++i) {
  318. auto iter = output_infos.find(std::string(reader.outputs[i].name));
  319. if (iter == output_infos.end()) {
  320. FDERROR << "Cannot find output name:" << reader.outputs[i].name
  321. << " from OpenVINO model." << std::endl;
  322. return false;
  323. }
  324. output_infos_.push_back(iter->second);
  325. }
  326. ov::AnyMap properties;
  327. if (option_.hint == "UNDEFINED") {
  328. if (option_.device == "CPU" && option_.cpu_thread_num > 0) {
  329. properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num;
  330. }
  331. if (option_.num_streams == -1) {
  332. properties["NUM_STREAMS"] = ov::streams::AUTO;
  333. } else if (option_.num_streams == -2) {
  334. properties["NUM_STREAMS"] = ov::streams::NUMA;
  335. } else if (option_.num_streams > 0) {
  336. properties["NUM_STREAMS"] = option_.num_streams;
  337. }
  338. FDINFO << "number of streams:" << option_.num_streams << "." << std::endl;
  339. if (option_.affinity == "NO") {
  340. properties.emplace(ov::hint::enable_cpu_pinning(false));
  341. }
  342. FDINFO << "affinity:" << option_.affinity << "." << std::endl;
  343. } else if (option_.hint == "LATENCY") {
  344. properties.emplace(
  345. ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY));
  346. } else if (option_.hint == "THROUGHPUT") {
  347. properties.emplace(
  348. ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
  349. } else if (option_.hint == "CUMULATIVE_THROUGHPUT") {
  350. properties.emplace(ov::hint::performance_mode(
  351. ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
  352. }
  353. FDINFO << "Compile OpenVINO model on device_name:" << option.device << "."
  354. << std::endl;
  355. compiled_model_ = core_.compile_model(model, option.device, properties);
  356. request_ = compiled_model_.create_infer_request();
  357. initialized_ = true;
  358. return true;
  359. }
  360. int OpenVINOBackend::NumInputs() const { return input_infos_.size(); }
  361. int OpenVINOBackend::NumOutputs() const { return output_infos_.size(); }
  362. bool OpenVINOBackend::Infer(std::vector<FDTensor> &inputs,
  363. std::vector<FDTensor> *outputs, bool copy_to_fd) {
  364. if (inputs.size() != input_infos_.size()) {
  365. FDERROR << "[OpenVINOBackend] Size of the inputs(" << inputs.size()
  366. << ") should keep same with the inputs of this model("
  367. << input_infos_.size() << ")." << std::endl;
  368. return false;
  369. }
  370. RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
  371. for (size_t i = 0; i < inputs.size(); ++i) {
  372. ov::Shape shape(inputs[i].shape.begin(), inputs[i].shape.end());
  373. ov::Tensor ov_tensor(FDDataTypeToOV(inputs[i].dtype), shape,
  374. inputs[i].Data());
  375. request_.set_tensor(inputs[i].name, ov_tensor);
  376. }
  377. RUNTIME_PROFILE_LOOP_BEGIN(1)
  378. request_.start_async();
  379. request_.wait();
  380. RUNTIME_PROFILE_LOOP_END
  381. outputs->resize(output_infos_.size());
  382. for (size_t i = 0; i < output_infos_.size(); ++i) {
  383. auto out_tensor = request_.get_output_tensor(i);
  384. auto out_tensor_shape = out_tensor.get_shape();
  385. std::vector<int64_t> shape(out_tensor_shape.begin(),
  386. out_tensor_shape.end());
  387. if (copy_to_fd) {
  388. (*outputs)[i].Resize(shape,
  389. OpenVINODataTypeToFD(out_tensor.get_element_type()),
  390. output_infos_[i].name, Device::CPU);
  391. memcpy((*outputs)[i].MutableData(), out_tensor.data(),
  392. (*outputs)[i].Nbytes());
  393. } else {
  394. (*outputs)[i].name = output_infos_[i].name;
  395. (*outputs)[i].SetExternalData(
  396. shape, OpenVINODataTypeToFD(out_tensor.get_element_type()),
  397. out_tensor.data(), Device::CPU);
  398. }
  399. }
  400. RUNTIME_PROFILE_LOOP_H2D_D2H_END
  401. return true;
  402. }
  403. std::unique_ptr<BaseBackend>
  404. OpenVINOBackend::Clone(RuntimeOption &runtime_option, void *stream,
  405. int device_id) {
  406. std::unique_ptr<BaseBackend> new_backend =
  407. utils::make_unique<OpenVINOBackend>();
  408. auto casted_backend = dynamic_cast<OpenVINOBackend *>(new_backend.get());
  409. casted_backend->option_ = option_;
  410. casted_backend->request_ = compiled_model_.create_infer_request();
  411. casted_backend->input_infos_.assign(input_infos_.begin(), input_infos_.end());
  412. casted_backend->output_infos_.assign(output_infos_.begin(),
  413. output_infos_.end());
  414. return new_backend;
  415. }
  416. } // namespace ultra_infer