rknpu2_backend.cc 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593
  1. // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. #include "ultra_infer/runtime/backends/rknpu2/rknpu2_backend.h"
  15. namespace ultra_infer {
  16. RKNPU2Backend::~RKNPU2Backend() {
  17. if (tensor_attrs_init_) {
  18. if (input_attrs_ != nullptr) {
  19. free(input_attrs_);
  20. }
  21. if (output_attrs_ != nullptr) {
  22. free(output_attrs_);
  23. }
  24. }
  25. if (tensor_memory_init_) {
  26. for (uint32_t i = 0; i < io_num_.n_input; i++) {
  27. rknn_destroy_mem(ctx_, input_mems_[i]);
  28. }
  29. for (uint32_t i = 0; i < io_num_.n_output; i++) {
  30. rknn_destroy_mem(ctx_, output_mems_[i]);
  31. }
  32. }
  33. }
  34. /*
  35. * @name RuntimeOptionIsApplicable
  36. * @brief This function is used to determine whether the RuntimeOption
  37. * meets the operating conditions of RKNPU2.
  38. * @param None
  39. * @return bool
  40. * @note None
  41. */
  42. bool RKNPU2Backend::RuntimeOptionIsApplicable(
  43. const RuntimeOption &runtime_option) {
  44. if (!Supported(runtime_option.model_format, Backend::RKNPU2)) {
  45. FDERROR << "The model format is not supported for RKNPU2." << std::endl;
  46. return false;
  47. }
  48. if (!Supported(runtime_option.device, Backend::RKNPU2)) {
  49. FDERROR << "The device is not supported for RKNPU2." << std::endl;
  50. return false;
  51. }
  52. if (runtime_option.model_from_memory_) {
  53. FDERROR << "RKNPU2 backend doesn't support load model from memory, please "
  54. "load model from disk."
  55. << std::endl;
  56. return false;
  57. }
  58. return true;
  59. }
  60. /*
  61. * @name GetSDKAndDeviceVersion
  62. * @brief Get RKNPU2 sdk and device version.
  63. * @param None
  64. * @return bool
  65. * @note The private variable ctx_ must be initialized.
  66. */
  67. bool RKNPU2Backend::GetSDKAndDeviceVersion() {
  68. int ret;
  69. ret = rknn_query(ctx_, RKNN_QUERY_SDK_VERSION, &sdk_ver_, sizeof(sdk_ver_));
  70. if (ret != RKNN_SUCC) {
  71. FDERROR << "The function(rknn_query) failed! ret=" << ret << std::endl;
  72. return false;
  73. }
  74. FDINFO << "rknpu2 runtime version: " << sdk_ver_.api_version << std::endl;
  75. FDINFO << "rknpu2 driver version: " << sdk_ver_.drv_version << std::endl;
  76. return true;
  77. }
  78. /*
  79. * @name BuildOption
  80. * @brief Save option and set core mask.
  81. * @param RKNPU2BackendOption
  82. * @note None
  83. */
  84. void RKNPU2Backend::BuildOption(const RKNPU2BackendOption &option) {
  85. option_ = option;
  86. // save cpu_name
  87. option_.cpu_name = option.cpu_name;
  88. // save context
  89. option_.core_mask = option.core_mask;
  90. // set core mask
  91. if (option_.cpu_name == rknpu2::CpuName::RK3588) {
  92. if (!SetCoreMask(option_.core_mask)) {
  93. FDERROR << "set core mask failed" << std::endl;
  94. }
  95. }
  96. }
  97. /***************************************************************
  98. * @name Init
  99. * @brief Initialize RKNN model
  100. * @param model_file: Binary data for the RKNN model or the path of RKNN
  101. * @return bool
  102. * @note None
  103. ***************************************************************/
  104. bool RKNPU2Backend::Init(const RuntimeOption &runtime_option) {
  105. if (!RuntimeOptionIsApplicable(runtime_option)) {
  106. FDERROR << "Runtime option is not applicable." << std::endl;
  107. return false;
  108. }
  109. if (!LoadModel((char *)runtime_option.model_file.data())) {
  110. FDERROR << "Load model failed" << std::endl;
  111. return false;
  112. }
  113. if (!InitInputAndOutputNumber()) {
  114. FDERROR << "Init input and output number failed" << std::endl;
  115. return false;
  116. }
  117. if (!GetSDKAndDeviceVersion()) {
  118. FDERROR << "Get SDK and device version failed" << std::endl;
  119. return false;
  120. }
  121. BuildOption(runtime_option.rknpu2_option);
  122. if (!InitInputAndOutputInformation()) {
  123. FDERROR << "Get model input output information failed" << std::endl;
  124. return false;
  125. }
  126. return true;
  127. }
  128. /*
  129. * @name SetCoreMask
  130. * @brief Set NPU core for model
  131. * @param core_mask: The specification of NPU core setting.
  132. * @return bool
  133. * @note Only support RK3588
  134. */
  135. bool RKNPU2Backend::SetCoreMask(const rknpu2::CoreMask &core_mask) const {
  136. if (option_.cpu_name != rknpu2::CpuName::RK3588) {
  137. FDINFO << "SetCoreMask only support when soc is RK3588." << std::endl;
  138. return false;
  139. }
  140. int ret = rknn_set_core_mask(ctx_, static_cast<rknn_core_mask>(core_mask));
  141. if (ret != RKNN_SUCC) {
  142. FDERROR << "The function(rknn_set_core_mask) failed! ret=" << ret
  143. << std::endl;
  144. return false;
  145. }
  146. return true;
  147. }
  148. /*
  149. * @name LoadModel
  150. * @brief Read the model and initialize rknn context.
  151. * @param model: Binary data for the RKNN model or the path of RKNN model.
  152. * @return bool
  153. * @note None
  154. */
  155. bool RKNPU2Backend::LoadModel(void *model) {
  156. int ret = RKNN_SUCC;
  157. ret = rknn_init(&ctx_, model, 0, 0, nullptr);
  158. if (ret != RKNN_SUCC) {
  159. FDERROR << "The function(rknn_init) failed! ret=" << ret << std::endl;
  160. return false;
  161. }
  162. return true;
  163. }
  164. /*
  165. * @name InitInputAndOutputNumber
  166. * @brief Initialize io_num_.
  167. * @param
  168. * @return bool
  169. * @note The private variable ctx must be initialized to use this
  170. * function.
  171. */
  172. bool RKNPU2Backend::InitInputAndOutputNumber() {
  173. if (io_num_init_) {
  174. FDERROR << "The private variable io_num_ has been initialized."
  175. << std::endl;
  176. return false;
  177. }
  178. int ret = RKNN_SUCC;
  179. ret = rknn_query(ctx_, RKNN_QUERY_IN_OUT_NUM, &io_num_, sizeof(io_num_));
  180. if (ret != RKNN_SUCC) {
  181. FDERROR << "The function(rknn_query) failed! ret=" << ret << std::endl;
  182. return false;
  183. }
  184. io_num_init_ = true;
  185. return true;
  186. }
  187. /*
  188. * @name InitRKNNTensorAddress
  189. * @brief Allocate memory for input_attrs_ and output_attrs_.
  190. * @param None
  191. * @return bool
  192. * @note None
  193. */
  194. bool RKNPU2Backend::InitRKNNTensorAddress() {
  195. if (tensor_attrs_init_) {
  196. FDERROR << "Private variable input_attrs_ and output_attrs_ memory has "
  197. "been allocated. Please do not allocate memory repeatedly or "
  198. "memory leak may occur."
  199. << std::endl;
  200. return false;
  201. }
  202. if (!io_num_init_) {
  203. InitInputAndOutputNumber();
  204. }
  205. if (io_num_.n_input == 0) {
  206. FDERROR << "The number of input tensors is 0." << std::endl;
  207. return false;
  208. }
  209. if (io_num_.n_output == 0) {
  210. FDERROR << "The number of output tensors is 0." << std::endl;
  211. return false;
  212. }
  213. // Allocate memory for private variable input_attrs_.
  214. input_attrs_ =
  215. (rknn_tensor_attr *)malloc(sizeof(rknn_tensor_attr) * io_num_.n_input);
  216. memset(input_attrs_, 0, io_num_.n_input * sizeof(rknn_tensor_attr));
  217. for (uint32_t i = 0; i < io_num_.n_input; i++) {
  218. int ret = RKNN_SUCC;
  219. input_attrs_[i].index = i;
  220. ret = rknn_query(ctx_, RKNN_QUERY_INPUT_ATTR, &(input_attrs_[i]),
  221. sizeof(rknn_tensor_attr));
  222. if (ret != RKNN_SUCC) {
  223. FDERROR << "The function(rknn_query) failed! ret=" << ret << std::endl;
  224. return false;
  225. }
  226. if ((input_attrs_[i].fmt != RKNN_TENSOR_NHWC) &&
  227. (input_attrs_[i].fmt != RKNN_TENSOR_UNDEFINED)) {
  228. FDERROR << "rknpu2_backend only support input format is NHWC or UNDEFINED"
  229. << std::endl;
  230. return false;
  231. }
  232. DumpTensorAttr(input_attrs_[i]);
  233. }
  234. // Allocate memory for private variable output_attrs_.
  235. output_attrs_ =
  236. (rknn_tensor_attr *)malloc(sizeof(rknn_tensor_attr) * io_num_.n_output);
  237. memset(output_attrs_, 0, io_num_.n_output * sizeof(rknn_tensor_attr));
  238. for (uint32_t i = 0; i < io_num_.n_output; i++) {
  239. int ret = RKNN_SUCC;
  240. output_attrs_[i].index = i;
  241. ret = rknn_query(ctx_, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs_[i]),
  242. sizeof(rknn_tensor_attr));
  243. if (ret != RKNN_SUCC) {
  244. FDERROR << "The function(rknn_query) failed! ret=" << ret << std::endl;
  245. return false;
  246. }
  247. // UltraInfer Only support postprocess when output type is fp32,
  248. // so output_attrs_.type needs to be fixed as RKNN_TENSOR_FLOAT32.
  249. output_attrs_[i].type = RKNN_TENSOR_FLOAT32;
  250. DumpTensorAttr(output_attrs_[i]);
  251. }
  252. tensor_attrs_init_ = true;
  253. return true;
  254. }
  255. /*
  256. * @name InitInputAndOutputInformation
  257. * @brief Get the detailed input and output information of Model
  258. * @param None
  259. * @return bool
  260. * @note None
  261. */
  262. bool RKNPU2Backend::InitInputAndOutputInformation() {
  263. if (!io_num_init_) {
  264. InitInputAndOutputNumber();
  265. }
  266. if (!tensor_attrs_init_) {
  267. InitRKNNTensorAddress();
  268. }
  269. if (io_num_.n_input == 0) {
  270. FDERROR << "The number of input tensors is 0." << std::endl;
  271. return false;
  272. }
  273. if (io_num_.n_output == 0) {
  274. FDERROR << "The number of output tensors is 0." << std::endl;
  275. return false;
  276. }
  277. inputs_desc_.resize(io_num_.n_input);
  278. outputs_desc_.resize(io_num_.n_output);
  279. // Get input info and copy to input tensor info
  280. for (uint32_t i = 0; i < io_num_.n_input; i++) {
  281. // Copy input_attrs_ to input tensor info
  282. std::string temp_name = input_attrs_[i].name;
  283. std::vector<int> temp_shape{};
  284. temp_shape.resize(input_attrs_[i].n_dims);
  285. for (int j = 0; j < input_attrs_[i].n_dims; j++) {
  286. temp_shape[j] = (int)input_attrs_[i].dims[j];
  287. }
  288. FDDataType temp_dtype =
  289. ultra_infer::RKNPU2Backend::RknnTensorTypeToFDDataType(
  290. input_attrs_[i].type);
  291. TensorInfo temp_input_info = {temp_name, temp_shape, temp_dtype};
  292. inputs_desc_[i] = temp_input_info;
  293. }
  294. for (uint32_t i = 0; i < io_num_.n_output; i++) {
  295. // If the output dimension is 3, the runtime will automatically change it
  296. // to 4. Obviously, this is wrong, and manual correction is required here.
  297. int n_dims = static_cast<int>(output_attrs_[i].n_dims);
  298. if ((n_dims == 4) && (output_attrs_[i].dims[3] == 1)) {
  299. n_dims--;
  300. }
  301. // Copy output_attrs_ to output tensor
  302. std::string temp_name = output_attrs_[i].name;
  303. std::vector<int> temp_shape{};
  304. temp_shape.resize(n_dims);
  305. for (int j = 0; j < n_dims; j++) {
  306. temp_shape[j] = (int)output_attrs_[i].dims[j];
  307. }
  308. // The data type of output data is changed to FP32
  309. FDDataType temp_dtype = FDDataType::FP32;
  310. TensorInfo temp_input_info = {temp_name, temp_shape, temp_dtype};
  311. outputs_desc_[i] = temp_input_info;
  312. }
  313. return true;
  314. }
  315. /*
  316. * @name DumpTensorAttr
  317. * @brief Get the model's detailed inputs and outputs
  318. * @param rknn_tensor_attr
  319. * @return None
  320. * @note None
  321. */
  322. void RKNPU2Backend::DumpTensorAttr(rknn_tensor_attr &attr) {
  323. printf("index=%d, name=%s, n_dims=%d, dims=[%d, %d, %d, %d], "
  324. "n_elems=%d, size=%d, fmt=%s, type=%s, "
  325. "qnt_type=%s, zp=%d, scale=%f, pass_through=%d\n",
  326. attr.index, attr.name, attr.n_dims, attr.dims[0], attr.dims[1],
  327. attr.dims[2], attr.dims[3], attr.n_elems, attr.size,
  328. get_format_string(attr.fmt), get_type_string(attr.type),
  329. get_qnt_type_string(attr.qnt_type), attr.zp, attr.scale,
  330. attr.pass_through);
  331. }
  332. TensorInfo RKNPU2Backend::GetInputInfo(int index) {
  333. FDASSERT(index < NumInputs(),
  334. "The index: %d should less than the number of inputs: %d.", index,
  335. NumInputs())
  336. return inputs_desc_[index];
  337. }
  338. std::vector<TensorInfo> RKNPU2Backend::GetInputInfos() { return inputs_desc_; }
  339. TensorInfo RKNPU2Backend::GetOutputInfo(int index) {
  340. FDASSERT(index < NumOutputs(),
  341. "The index: %d should less than the number of outputs %d.", index,
  342. NumOutputs())
  343. return outputs_desc_[index];
  344. }
  345. std::vector<TensorInfo> RKNPU2Backend::GetOutputInfos() {
  346. return outputs_desc_;
  347. }
  348. /*
  349. * @name InitRKNNTensorMemory
  350. * @brief Allocate memory for input and output tensors.
  351. * @param std::vector<FDTensor>& inputs
  352. * @return None
  353. * @note None
  354. */
  355. bool RKNPU2Backend::InitRKNNTensorMemory(std::vector<FDTensor> &inputs) {
  356. if (tensor_memory_init_) {
  357. FDERROR << "Private variable input_mems_ and output_mems_ memory has "
  358. "been allocated. Please do not allocate memory repeatedly or "
  359. "memory leak may occur."
  360. << std::endl;
  361. return false;
  362. }
  363. int ret = RKNN_SUCC;
  364. input_mems_.resize(io_num_.n_input);
  365. output_mems_.resize(io_num_.n_output);
  366. for (uint32_t i = 0; i < io_num_.n_input; i++) {
  367. // Judge whether the input and output types are the same
  368. rknn_tensor_type input_type =
  369. ultra_infer::RKNPU2Backend::FDDataTypeToRknnTensorType(inputs[i].dtype);
  370. if (input_type != input_attrs_[i].type) {
  371. FDWARNING << "The input tensor type != model's inputs type."
  372. << "The input_type need "
  373. << get_type_string(input_attrs_[i].type) << ",but inputs[" << i
  374. << "].type is " << get_type_string(input_type) << std::endl;
  375. }
  376. // Create input tensor memory
  377. input_attrs_[i].type = input_type;
  378. input_attrs_[i].size = inputs[i].Nbytes();
  379. input_attrs_[i].size_with_stride = inputs[i].Nbytes();
  380. input_mems_[i] = rknn_create_mem(ctx_, inputs[i].Nbytes());
  381. if (input_mems_[i] == nullptr) {
  382. FDERROR << "The function(rknn_create_mem) failed! ret=" << ret
  383. << std::endl;
  384. return false;
  385. }
  386. // Set input tensor memory
  387. ret = rknn_set_io_mem(ctx_, input_mems_[i], &input_attrs_[i]);
  388. if (ret != RKNN_SUCC) {
  389. FDERROR << "The function(rknn_set_io_mem) failed! ret=" << ret
  390. << std::endl;
  391. return false;
  392. }
  393. }
  394. for (uint32_t i = 0; i < io_num_.n_output; ++i) {
  395. // Most post-processing does not support the fp16 format.
  396. uint32_t output_size = output_attrs_[i].n_elems * sizeof(float);
  397. output_mems_[i] = rknn_create_mem(ctx_, output_size);
  398. if (output_mems_[i] == nullptr) {
  399. FDERROR << "The function(rknn_create_mem) failed! ret=" << ret
  400. << std::endl;
  401. return false;
  402. }
  403. // Set output tensor memory
  404. ret = rknn_set_io_mem(ctx_, output_mems_[i], &output_attrs_[i]);
  405. if (ret != RKNN_SUCC) {
  406. FDERROR << "The function(rknn_set_io_mem) failed! ret=" << ret
  407. << std::endl;
  408. return false;
  409. }
  410. }
  411. tensor_memory_init_ = true;
  412. return true;
  413. }
  414. bool RKNPU2Backend::Infer(std::vector<FDTensor> &inputs,
  415. std::vector<FDTensor> *outputs, bool copy_to_fd) {
  416. if (!tensor_memory_init_) {
  417. if (!InitRKNNTensorMemory(inputs)) {
  418. FDERROR << "Init tensor memory failed." << std::endl;
  419. }
  420. }
  421. int ret = RKNN_SUCC;
  422. // Judge whether the input and output size are the same
  423. if (inputs.size() != inputs_desc_.size()) {
  424. FDERROR << "[RKNPU2Backend] Size of the inputs(" << inputs.size()
  425. << ") should keep same with the inputs of this model("
  426. << inputs_desc_.size() << ")." << std::endl;
  427. return false;
  428. }
  429. // Copy input data to input tensor memory
  430. for (uint32_t i = 0; i < io_num_.n_input; i++) {
  431. uint32_t width = input_attrs_[i].dims[2];
  432. uint32_t stride = input_attrs_[i].w_stride;
  433. if (width == stride) {
  434. if (inputs[i].Data() == nullptr) {
  435. FDERROR << "inputs[0].Data is NULL." << std::endl;
  436. return false;
  437. }
  438. memcpy(input_mems_[i]->virt_addr, inputs[i].Data(), inputs[i].Nbytes());
  439. } else {
  440. FDERROR << "[RKNPU2Backend] only support width == stride." << std::endl;
  441. return false;
  442. }
  443. }
  444. // run rknn
  445. ret = rknn_run(ctx_, nullptr);
  446. if (ret != RKNN_SUCC) {
  447. FDERROR << "rknn run error! ret=" << ret << std::endl;
  448. return false;
  449. }
  450. // get result
  451. outputs->resize(outputs_desc_.size());
  452. std::vector<int64_t> temp_shape(4);
  453. for (size_t i = 0; i < outputs_desc_.size(); ++i) {
  454. temp_shape.resize(outputs_desc_[i].shape.size());
  455. for (int j = 0; j < outputs_desc_[i].shape.size(); ++j) {
  456. temp_shape[j] = outputs_desc_[i].shape[j];
  457. }
  458. (*outputs)[i].Resize(temp_shape, outputs_desc_[i].dtype,
  459. outputs_desc_[i].name);
  460. memcpy((*outputs)[i].MutableData(), (float *)output_mems_[i]->virt_addr,
  461. (*outputs)[i].Nbytes());
  462. }
  463. return true;
  464. }
  465. /*
  466. * @name RknnTensorTypeToFDDataType
  467. * @brief Change RknnTensorType To FDDataType
  468. * @param rknn_tensor_type
  469. * @return None
  470. * @note Most post-processing does not support the fp16 format.
  471. * Therefore, if the input is FP16, the output will be FP32.
  472. */
  473. FDDataType RKNPU2Backend::RknnTensorTypeToFDDataType(rknn_tensor_type type) {
  474. if (type == rknn_tensor_type::RKNN_TENSOR_FLOAT16) {
  475. return FDDataType::FP32;
  476. }
  477. if (type == rknn_tensor_type::RKNN_TENSOR_FLOAT32) {
  478. return FDDataType::FP32;
  479. }
  480. if (type == rknn_tensor_type::RKNN_TENSOR_INT8) {
  481. return FDDataType::INT8;
  482. }
  483. if (type == rknn_tensor_type::RKNN_TENSOR_INT16) {
  484. return FDDataType::INT16;
  485. }
  486. if (type == rknn_tensor_type::RKNN_TENSOR_INT32) {
  487. return FDDataType::INT32;
  488. }
  489. if (type == rknn_tensor_type::RKNN_TENSOR_UINT8) {
  490. return FDDataType::UINT8;
  491. }
  492. if (type == rknn_tensor_type::RKNN_TENSOR_BOOL) {
  493. return FDDataType::BOOL;
  494. }
  495. FDERROR << "FDDataType don't support this type" << std::endl;
  496. return FDDataType::UNKNOWN1;
  497. }
  498. /*
  499. * @name FDDataTypeToRknnTensorType
  500. * @brief Change FDDataType To RknnTensorType
  501. * @param FDDataType
  502. * @return None
  503. * @note None
  504. */
  505. rknn_tensor_type
  506. RKNPU2Backend::FDDataTypeToRknnTensorType(ultra_infer::FDDataType type) {
  507. if (type == FDDataType::FP16) {
  508. return rknn_tensor_type::RKNN_TENSOR_FLOAT16;
  509. }
  510. if (type == FDDataType::FP32) {
  511. return rknn_tensor_type::RKNN_TENSOR_FLOAT32;
  512. }
  513. if (type == FDDataType::INT8) {
  514. return rknn_tensor_type::RKNN_TENSOR_INT8;
  515. }
  516. if (type == FDDataType::INT16) {
  517. return rknn_tensor_type::RKNN_TENSOR_INT16;
  518. }
  519. if (type == FDDataType::INT32) {
  520. return rknn_tensor_type::RKNN_TENSOR_INT32;
  521. }
  522. if (type == FDDataType::UINT8) {
  523. return rknn_tensor_type::RKNN_TENSOR_UINT8;
  524. }
  525. if (type == FDDataType::BOOL) {
  526. return rknn_tensor_type::RKNN_TENSOR_BOOL;
  527. }
  528. FDERROR << "rknn_tensor_type don't support this type" << std::endl;
  529. return RKNN_TENSOR_TYPE_MAX;
  530. }
  531. } // namespace ultra_infer