ov_backend.cc 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/backends/openvino/ov_backend.h"
  15. #ifdef ENABLE_PADDLE2ONNX
  16. #include "paddle2onnx/converter.h"
  17. #endif
  18. namespace ultra_infer {
  19. std::vector<int64_t> PartialShapeToVec(const ov::PartialShape &shape) {
  20. std::vector<int64_t> res;
  21. for (int i = 0; i < shape.size(); ++i) {
  22. auto dim = shape[i];
  23. if (dim.is_dynamic()) {
  24. res.push_back(-1);
  25. } else {
  26. res.push_back(dim.get_length());
  27. }
  28. }
  29. return res;
  30. }
  31. ov::PartialShape VecToPartialShape(const std::vector<int64_t> &shape) {
  32. std::vector<ov::Dimension> dims;
  33. for (size_t i = 0; i < shape.size(); ++i) {
  34. dims.emplace_back(ov::Dimension(shape[i]));
  35. }
  36. return ov::PartialShape(dims);
  37. }
  38. FDDataType OpenVINODataTypeToFD(const ov::element::Type &type) {
  39. if (type == ov::element::f32) {
  40. return FDDataType::FP32;
  41. } else if (type == ov::element::f16) {
  42. return FDDataType::FP16;
  43. } else if (type == ov::element::f64) {
  44. return FDDataType::FP64;
  45. } else if (type == ov::element::i8) {
  46. return FDDataType::INT8;
  47. } else if (type == ov::element::u8) {
  48. return FDDataType::UINT8;
  49. } else if (type == ov::element::i32) {
  50. return FDDataType::INT32;
  51. } else if (type == ov::element::i64) {
  52. return FDDataType::INT64;
  53. } else {
  54. FDASSERT(false, "Only support float/double/int8/int32/int64/float16 now.");
  55. }
  56. return FDDataType::FP32;
  57. }
  58. ov::element::Type FDDataTypeToOV(const FDDataType &type) {
  59. if (type == FDDataType::FP32) {
  60. return ov::element::f32;
  61. } else if (type == FDDataType::FP64) {
  62. return ov::element::f64;
  63. } else if (type == FDDataType::INT8) {
  64. return ov::element::i8;
  65. } else if (type == FDDataType::UINT8) {
  66. return ov::element::u8;
  67. } else if (type == FDDataType::INT32) {
  68. return ov::element::i32;
  69. } else if (type == FDDataType::INT64) {
  70. return ov::element::i64;
  71. } else if (type == FDDataType::FP16) {
  72. return ov::element::f16;
  73. }
  74. FDASSERT(false,
  75. "Only support float/double/int8/uint8/int32/int64/float16 now.");
  76. return ov::element::f32;
  77. }
  78. ov::Core OpenVINOBackend::core_;
  79. void OpenVINOBackend::InitTensorInfo(
  80. const std::vector<ov::Output<ov::Node>> &ov_outputs,
  81. std::map<std::string, TensorInfo> *tensor_infos) {
  82. for (size_t i = 0; i < ov_outputs.size(); ++i) {
  83. TensorInfo info;
  84. auto partial_shape = PartialShapeToVec(ov_outputs[i].get_partial_shape());
  85. info.shape.assign(partial_shape.begin(), partial_shape.end());
  86. info.name = ov_outputs[i].get_any_name();
  87. info.dtype = OpenVINODataTypeToFD(ov_outputs[i].get_element_type());
  88. tensor_infos->insert(std::make_pair(info.name, info));
  89. }
  90. }
  91. bool OpenVINOBackend::Init(const RuntimeOption &option) {
  92. if (option.model_from_memory_) {
  93. FDERROR << "OpenVINOBackend doesn't support load model from memory, please "
  94. "load model from disk."
  95. << std::endl;
  96. return false;
  97. }
  98. if (option.device != Device::CPU) {
  99. FDERROR << "OpenVINOBackend only supports Device::CPU, but now its "
  100. << option.device << "." << std::endl;
  101. return false;
  102. }
  103. if (option.model_format == ModelFormat::PADDLE) {
  104. return InitFromPaddle(option.model_file, option.params_file,
  105. option.openvino_option);
  106. } else if (option.model_format == ModelFormat::ONNX) {
  107. return InitFromOnnx(option.model_file, option.openvino_option);
  108. } else {
  109. FDERROR << "OpenVINOBackend only supports model format Paddle/ONNX, but "
  110. "now its "
  111. << option.model_format << std::endl;
  112. return false;
  113. }
  114. return false;
  115. }
  116. bool OpenVINOBackend::InitFromPaddle(const std::string &model_file,
  117. const std::string &params_file,
  118. const OpenVINOBackendOption &option) {
  119. if (initialized_) {
  120. FDERROR << "OpenVINOBackend is already initlized, cannot initialize again."
  121. << std::endl;
  122. return false;
  123. }
  124. option_ = option;
  125. std::shared_ptr<ov::Model> model = core_.read_model(model_file, params_file);
  126. if (option_.shape_infos.size() > 0) {
  127. std::map<std::string, ov::PartialShape> shape_infos;
  128. for (const auto &item : option_.shape_infos) {
  129. shape_infos[item.first] = VecToPartialShape(item.second);
  130. }
  131. model->reshape(shape_infos);
  132. }
  133. if (option_.device.find("HETERO") != std::string::npos) {
  134. auto supported_ops = core_.query_model(model, option_.device);
  135. for (auto &&op : model->get_ops()) {
  136. auto &affinity = supported_ops[op->get_friendly_name()];
  137. if (option_.cpu_operators.find(op->description()) !=
  138. option_.cpu_operators.end()) {
  139. op->get_rt_info()["affinity"] = "CPU";
  140. } else {
  141. op->get_rt_info()["affinity"] = affinity;
  142. }
  143. }
  144. }
  145. // Get inputs/outputs information from loaded model
  146. const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
  147. std::map<std::string, TensorInfo> input_infos;
  148. InitTensorInfo(inputs, &input_infos);
  149. const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
  150. std::map<std::string, TensorInfo> output_infos;
  151. InitTensorInfo(outputs, &output_infos);
  152. // OpenVINO model may not keep the same order with original model
  153. // So here will reorder it's inputs and outputs
  154. std::string model_content;
  155. ReadBinaryFromFile(model_file, &model_content);
  156. auto reader =
  157. paddle2onnx::PaddleReader(model_content.c_str(), model_content.size());
  158. if (reader.num_inputs != input_infos.size()) {
  159. FDERROR << "The number of inputs from PaddleReader:" << reader.num_inputs
  160. << " not equal to the number of inputs from OpenVINO:"
  161. << input_infos.size() << "." << std::endl;
  162. return false;
  163. }
  164. if (reader.num_outputs != output_infos.size()) {
  165. FDERROR << "The number of outputs from PaddleReader:" << reader.num_outputs
  166. << " not equal to the number of outputs from OpenVINO:"
  167. << output_infos.size() << "." << std::endl;
  168. return false;
  169. }
  170. for (int i = 0; i < reader.num_inputs; ++i) {
  171. auto iter = input_infos.find(std::string(reader.inputs[i].name));
  172. if (iter == input_infos.end()) {
  173. FDERROR << "Cannot find input name:" << reader.inputs[i].name
  174. << " from OpenVINO model." << std::endl;
  175. return false;
  176. }
  177. input_infos_.push_back(iter->second);
  178. }
  179. for (int i = 0; i < reader.num_outputs; ++i) {
  180. auto iter = output_infos.find(std::string(reader.outputs[i].name));
  181. if (iter == output_infos.end()) {
  182. FDERROR << "Cannot find output name:" << reader.outputs[i].name
  183. << " from OpenVINO model." << std::endl;
  184. return false;
  185. }
  186. output_infos_.push_back(iter->second);
  187. }
  188. ov::AnyMap properties;
  189. if (option_.hint == "UNDEFINED") {
  190. if (option_.device == "CPU" && option_.cpu_thread_num > 0) {
  191. properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num;
  192. }
  193. if (option_.num_streams == -1) {
  194. properties["NUM_STREAMS"] = ov::streams::AUTO;
  195. } else if (option_.num_streams == -2) {
  196. properties["NUM_STREAMS"] = ov::streams::NUMA;
  197. } else if (option_.num_streams > 0) {
  198. properties["NUM_STREAMS"] = option_.num_streams;
  199. }
  200. FDINFO << "number of streams:" << option_.num_streams << "." << std::endl;
  201. if (option_.affinity == "YES") {
  202. properties["AFFINITY"] = "CORE";
  203. } else if (option_.affinity == "NO") {
  204. properties["AFFINITY"] = "NONE";
  205. } else if (option_.affinity == "NUMA") {
  206. properties["AFFINITY"] = "NUMA";
  207. } else if (option_.affinity == "HYBRID_AWARE") {
  208. properties["AFFINITY"] = "HYBRID_AWARE";
  209. }
  210. FDINFO << "affinity:" << option_.affinity << "." << std::endl;
  211. } else if (option_.hint == "LATENCY") {
  212. properties.emplace(
  213. ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY));
  214. } else if (option_.hint == "THROUGHPUT") {
  215. properties.emplace(
  216. ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
  217. } else if (option_.hint == "CUMULATIVE_THROUGHPUT") {
  218. properties.emplace(ov::hint::performance_mode(
  219. ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
  220. }
  221. FDINFO << "Compile OpenVINO model on device_name:" << option.device << "."
  222. << std::endl;
  223. compiled_model_ = core_.compile_model(model, option.device, properties);
  224. request_ = compiled_model_.create_infer_request();
  225. initialized_ = true;
  226. return true;
  227. }
  228. TensorInfo OpenVINOBackend::GetInputInfo(int index) {
  229. FDASSERT(index < NumInputs(),
  230. "The index: %d should less than the number of outputs: %d.", index,
  231. NumOutputs());
  232. return input_infos_[index];
  233. }
  234. std::vector<TensorInfo> OpenVINOBackend::GetInputInfos() {
  235. return input_infos_;
  236. }
  237. std::vector<TensorInfo> OpenVINOBackend::GetOutputInfos() {
  238. return output_infos_;
  239. }
  240. TensorInfo OpenVINOBackend::GetOutputInfo(int index) {
  241. FDASSERT(index < NumOutputs(),
  242. "The index: %d should less than the number of outputs: %d.", index,
  243. NumOutputs());
  244. return output_infos_[index];
  245. }
  246. bool OpenVINOBackend::InitFromOnnx(const std::string &model_file,
  247. const OpenVINOBackendOption &option) {
  248. if (initialized_) {
  249. FDERROR << "OpenVINOBackend is already initlized, cannot initialize again."
  250. << std::endl;
  251. return false;
  252. }
  253. option_ = option;
  254. std::shared_ptr<ov::Model> model = core_.read_model(model_file);
  255. if (option_.shape_infos.size() > 0) {
  256. std::map<std::string, ov::PartialShape> shape_infos;
  257. for (const auto &item : option_.shape_infos) {
  258. shape_infos[item.first] = VecToPartialShape(item.second);
  259. }
  260. model->reshape(shape_infos);
  261. }
  262. if (option_.device.find("HETERO") != std::string::npos) {
  263. auto supported_ops = core_.query_model(model, option_.device);
  264. for (auto &&op : model->get_ops()) {
  265. auto &affinity = supported_ops[op->get_friendly_name()];
  266. if (option_.cpu_operators.find(op->description()) !=
  267. option_.cpu_operators.end()) {
  268. op->get_rt_info()["affinity"] = "CPU";
  269. } else {
  270. op->get_rt_info()["affinity"] = affinity;
  271. }
  272. }
  273. }
  274. // Get inputs/outputs information from loaded model
  275. const std::vector<ov::Output<ov::Node>> inputs = model->inputs();
  276. std::map<std::string, TensorInfo> input_infos;
  277. InitTensorInfo(inputs, &input_infos);
  278. const std::vector<ov::Output<ov::Node>> outputs = model->outputs();
  279. std::map<std::string, TensorInfo> output_infos;
  280. InitTensorInfo(outputs, &output_infos);
  281. // OpenVINO model may not keep the same order with original model
  282. // So here will reorder it's inputs and outputs
  283. std::string model_content;
  284. ReadBinaryFromFile(model_file, &model_content);
  285. auto reader =
  286. paddle2onnx::OnnxReader(model_content.c_str(), model_content.size());
  287. if (reader.num_inputs != input_infos.size()) {
  288. FDERROR << "The number of inputs from OnnxReader:" << reader.num_inputs
  289. << " not equal to the number of inputs from OpenVINO:"
  290. << input_infos.size() << "." << std::endl;
  291. return false;
  292. }
  293. if (reader.num_outputs != output_infos.size()) {
  294. FDERROR << "The number of outputs from OnnxReader:" << reader.num_outputs
  295. << " not equal to the number of outputs from OpenVINO:"
  296. << output_infos.size() << "." << std::endl;
  297. return false;
  298. }
  299. for (int i = 0; i < reader.num_inputs; ++i) {
  300. auto iter = input_infos.find(std::string(reader.inputs[i].name));
  301. if (iter == input_infos.end()) {
  302. FDERROR << "Cannot find input name:" << reader.inputs[i].name
  303. << " from OpenVINO model." << std::endl;
  304. return false;
  305. }
  306. input_infos_.push_back(iter->second);
  307. }
  308. for (int i = 0; i < reader.num_outputs; ++i) {
  309. auto iter = output_infos.find(std::string(reader.outputs[i].name));
  310. if (iter == output_infos.end()) {
  311. FDERROR << "Cannot find output name:" << reader.outputs[i].name
  312. << " from OpenVINO model." << std::endl;
  313. return false;
  314. }
  315. output_infos_.push_back(iter->second);
  316. }
  317. ov::AnyMap properties;
  318. if (option_.hint == "UNDEFINED") {
  319. if (option_.device == "CPU" && option_.cpu_thread_num > 0) {
  320. properties["INFERENCE_NUM_THREADS"] = option_.cpu_thread_num;
  321. }
  322. if (option_.num_streams == -1) {
  323. properties["NUM_STREAMS"] = ov::streams::AUTO;
  324. } else if (option_.num_streams == -2) {
  325. properties["NUM_STREAMS"] = ov::streams::NUMA;
  326. } else if (option_.num_streams > 0) {
  327. properties["NUM_STREAMS"] = option_.num_streams;
  328. }
  329. FDINFO << "number of streams:" << option_.num_streams << "." << std::endl;
  330. if (option_.affinity == "YES") {
  331. properties["AFFINITY"] = "CORE";
  332. } else if (option_.affinity == "NO") {
  333. properties["AFFINITY"] = "NONE";
  334. } else if (option_.affinity == "NUMA") {
  335. properties["AFFINITY"] = "NUMA";
  336. } else if (option_.affinity == "HYBRID_AWARE") {
  337. properties["AFFINITY"] = "HYBRID_AWARE";
  338. }
  339. FDINFO << "affinity:" << option_.affinity << "." << std::endl;
  340. } else if (option_.hint == "LATENCY") {
  341. properties.emplace(
  342. ov::hint::performance_mode(ov::hint::PerformanceMode::LATENCY));
  343. } else if (option_.hint == "THROUGHPUT") {
  344. properties.emplace(
  345. ov::hint::performance_mode(ov::hint::PerformanceMode::THROUGHPUT));
  346. } else if (option_.hint == "CUMULATIVE_THROUGHPUT") {
  347. properties.emplace(ov::hint::performance_mode(
  348. ov::hint::PerformanceMode::CUMULATIVE_THROUGHPUT));
  349. }
  350. FDINFO << "Compile OpenVINO model on device_name:" << option.device << "."
  351. << std::endl;
  352. compiled_model_ = core_.compile_model(model, option.device, properties);
  353. request_ = compiled_model_.create_infer_request();
  354. initialized_ = true;
  355. return true;
  356. }
  357. int OpenVINOBackend::NumInputs() const { return input_infos_.size(); }
  358. int OpenVINOBackend::NumOutputs() const { return output_infos_.size(); }
  359. bool OpenVINOBackend::Infer(std::vector<FDTensor> &inputs,
  360. std::vector<FDTensor> *outputs, bool copy_to_fd) {
  361. if (inputs.size() != input_infos_.size()) {
  362. FDERROR << "[OpenVINOBackend] Size of the inputs(" << inputs.size()
  363. << ") should keep same with the inputs of this model("
  364. << input_infos_.size() << ")." << std::endl;
  365. return false;
  366. }
  367. RUNTIME_PROFILE_LOOP_H2D_D2H_BEGIN
  368. for (size_t i = 0; i < inputs.size(); ++i) {
  369. ov::Shape shape(inputs[i].shape.begin(), inputs[i].shape.end());
  370. ov::Tensor ov_tensor(FDDataTypeToOV(inputs[i].dtype), shape,
  371. inputs[i].Data());
  372. request_.set_tensor(inputs[i].name, ov_tensor);
  373. }
  374. RUNTIME_PROFILE_LOOP_BEGIN(1)
  375. request_.start_async();
  376. request_.wait();
  377. RUNTIME_PROFILE_LOOP_END
  378. outputs->resize(output_infos_.size());
  379. for (size_t i = 0; i < output_infos_.size(); ++i) {
  380. auto out_tensor = request_.get_output_tensor(i);
  381. auto out_tensor_shape = out_tensor.get_shape();
  382. std::vector<int64_t> shape(out_tensor_shape.begin(),
  383. out_tensor_shape.end());
  384. if (copy_to_fd) {
  385. (*outputs)[i].Resize(shape,
  386. OpenVINODataTypeToFD(out_tensor.get_element_type()),
  387. output_infos_[i].name, Device::CPU);
  388. memcpy((*outputs)[i].MutableData(), out_tensor.data(),
  389. (*outputs)[i].Nbytes());
  390. } else {
  391. (*outputs)[i].name = output_infos_[i].name;
  392. (*outputs)[i].SetExternalData(
  393. shape, OpenVINODataTypeToFD(out_tensor.get_element_type()),
  394. out_tensor.data(), Device::CPU);
  395. }
  396. }
  397. RUNTIME_PROFILE_LOOP_H2D_D2H_END
  398. return true;
  399. }
  400. std::unique_ptr<BaseBackend>
  401. OpenVINOBackend::Clone(RuntimeOption &runtime_option, void *stream,
  402. int device_id) {
  403. std::unique_ptr<BaseBackend> new_backend =
  404. utils::make_unique<OpenVINOBackend>();
  405. auto casted_backend = dynamic_cast<OpenVINOBackend *>(new_backend.get());
  406. casted_backend->option_ = option_;
  407. casted_backend->request_ = compiled_model_.create_infer_request();
  408. casted_backend->input_infos_.assign(input_infos_.begin(), input_infos_.end());
  409. casted_backend->output_infos_.assign(output_infos_.begin(),
  410. output_infos_.end());
  411. return new_backend;
  412. }
  413. } // namespace ultra_infer