static_infer.py 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import abc
  15. import subprocess
  16. from pathlib import Path
  17. from typing import List, Sequence
  18. import numpy as np
  19. from ....utils import logging
  20. from ....utils.deps import class_requires_deps
  21. from ....utils.device import constr_device
  22. from ....utils.flags import DEBUG, INFER_BENCHMARK_USE_NEW_INFER_API, USE_PIR_TRT
  23. from ...utils.benchmark import benchmark, set_inference_operations
  24. from ...utils.hpi import (
  25. HPIConfig,
  26. OMConfig,
  27. ONNXRuntimeConfig,
  28. OpenVINOConfig,
  29. TensorRTConfig,
  30. get_model_paths,
  31. suggest_inference_backend_and_config,
  32. )
  33. from ...utils.pp_option import PaddlePredictorOption
  34. from ...utils.trt_config import DISABLE_TRT_HALF_OPS_CONFIG
  35. CACHE_DIR = ".cache"
  36. INFERENCE_OPERATIONS = [
  37. "PaddleCopyToDevice",
  38. "PaddleCopyToHost",
  39. "PaddleModelInfer",
  40. "PaddleInferChainLegacy",
  41. "MultiBackendInfer",
  42. ]
  43. set_inference_operations(INFERENCE_OPERATIONS)
  44. # XXX: Better use Paddle Inference API to do this
  45. def _pd_dtype_to_np_dtype(pd_dtype):
  46. import paddle
  47. if pd_dtype == paddle.inference.DataType.FLOAT64:
  48. return np.float64
  49. elif pd_dtype == paddle.inference.DataType.FLOAT32:
  50. return np.float32
  51. elif pd_dtype == paddle.inference.DataType.INT64:
  52. return np.int64
  53. elif pd_dtype == paddle.inference.DataType.INT32:
  54. return np.int32
  55. elif pd_dtype == paddle.inference.DataType.UINT8:
  56. return np.uint8
  57. elif pd_dtype == paddle.inference.DataType.INT8:
  58. return np.int8
  59. else:
  60. raise TypeError(f"Unsupported data type: {pd_dtype}")
  61. # old trt
  62. def _collect_trt_shape_range_info(
  63. model_file,
  64. model_params,
  65. gpu_id,
  66. shape_range_info_path,
  67. dynamic_shapes,
  68. dynamic_shape_input_data,
  69. ):
  70. import paddle.inference
  71. dynamic_shape_input_data = dynamic_shape_input_data or {}
  72. config = paddle.inference.Config(model_file, model_params)
  73. config.enable_use_gpu(100, gpu_id)
  74. config.collect_shape_range_info(shape_range_info_path)
  75. # TODO: Add other needed options
  76. config.disable_glog_info()
  77. predictor = paddle.inference.create_predictor(config)
  78. input_names = predictor.get_input_names()
  79. for name in dynamic_shapes:
  80. if name not in input_names:
  81. raise ValueError(
  82. f"Invalid input name {repr(name)} found in `dynamic_shapes`"
  83. )
  84. for name in input_names:
  85. if name not in dynamic_shapes:
  86. raise ValueError(f"Input name {repr(name)} not found in `dynamic_shapes`")
  87. for name in dynamic_shape_input_data:
  88. if name not in input_names:
  89. raise ValueError(
  90. f"Invalid input name {repr(name)} found in `dynamic_shape_input_data`"
  91. )
  92. # It would be better to check if the shapes are valid.
  93. min_arrs, opt_arrs, max_arrs = {}, {}, {}
  94. for name, candidate_shapes in dynamic_shapes.items():
  95. # XXX: Currently we have no way to get the data type of the tensor
  96. # without creating an input handle.
  97. handle = predictor.get_input_handle(name)
  98. dtype = _pd_dtype_to_np_dtype(handle.type())
  99. min_shape, opt_shape, max_shape = candidate_shapes
  100. if name in dynamic_shape_input_data:
  101. min_arrs[name] = np.array(
  102. dynamic_shape_input_data[name][0], dtype=dtype
  103. ).reshape(min_shape)
  104. opt_arrs[name] = np.array(
  105. dynamic_shape_input_data[name][1], dtype=dtype
  106. ).reshape(opt_shape)
  107. max_arrs[name] = np.array(
  108. dynamic_shape_input_data[name][2], dtype=dtype
  109. ).reshape(max_shape)
  110. else:
  111. min_arrs[name] = np.ones(min_shape, dtype=dtype)
  112. opt_arrs[name] = np.ones(opt_shape, dtype=dtype)
  113. max_arrs[name] = np.ones(max_shape, dtype=dtype)
  114. # `opt_arrs` is used twice to ensure it is the most frequently used.
  115. for arrs in [min_arrs, opt_arrs, opt_arrs, max_arrs]:
  116. for name, arr in arrs.items():
  117. handle = predictor.get_input_handle(name)
  118. handle.reshape(arr.shape)
  119. handle.copy_from_cpu(arr)
  120. predictor.run()
  121. # HACK: The shape range info will be written to the file only when
  122. # `predictor` is garbage collected. It works in CPython, but it is
  123. # definitely a bad idea to count on the implementation-dependent behavior of
  124. # a garbage collector. Is there a more explicit and deterministic way to
  125. # handle this?
  126. # HACK: Manually delete the predictor to trigger its destructor, ensuring that the shape_range_info file would be saved.
  127. del predictor
  128. # pir trt
  129. def _convert_trt(
  130. trt_cfg_setting,
  131. pp_model_file,
  132. pp_params_file,
  133. trt_save_path,
  134. device_id,
  135. dynamic_shapes,
  136. dynamic_shape_input_data,
  137. ):
  138. import paddle.inference
  139. from paddle.tensorrt.export import Input, TensorRTConfig, convert
  140. def _set_trt_config():
  141. for attr_name in trt_cfg_setting:
  142. assert hasattr(
  143. trt_config, attr_name
  144. ), f"The `{type(trt_config)}` don't have the attribute `{attr_name}`!"
  145. setattr(trt_config, attr_name, trt_cfg_setting[attr_name])
  146. def _get_predictor(model_file, params_file):
  147. # HACK
  148. config = paddle.inference.Config(str(model_file), str(params_file))
  149. config.enable_use_gpu(100, device_id)
  150. # NOTE: Disable oneDNN to circumvent a bug in Paddle Inference
  151. config.disable_mkldnn()
  152. config.disable_glog_info()
  153. return paddle.inference.create_predictor(config)
  154. dynamic_shape_input_data = dynamic_shape_input_data or {}
  155. predictor = _get_predictor(pp_model_file, pp_params_file)
  156. input_names = predictor.get_input_names()
  157. for name in dynamic_shapes:
  158. if name not in input_names:
  159. raise ValueError(
  160. f"Invalid input name {repr(name)} found in `dynamic_shapes`"
  161. )
  162. for name in input_names:
  163. if name not in dynamic_shapes:
  164. raise ValueError(f"Input name {repr(name)} not found in `dynamic_shapes`")
  165. for name in dynamic_shape_input_data:
  166. if name not in input_names:
  167. raise ValueError(
  168. f"Invalid input name {repr(name)} found in `dynamic_shape_input_data`"
  169. )
  170. trt_inputs = []
  171. for name, candidate_shapes in dynamic_shapes.items():
  172. # XXX: Currently we have no way to get the data type of the tensor
  173. # without creating an input handle.
  174. handle = predictor.get_input_handle(name)
  175. dtype = _pd_dtype_to_np_dtype(handle.type())
  176. min_shape, opt_shape, max_shape = candidate_shapes
  177. if name in dynamic_shape_input_data:
  178. min_arr = np.array(dynamic_shape_input_data[name][0], dtype=dtype).reshape(
  179. min_shape
  180. )
  181. opt_arr = np.array(dynamic_shape_input_data[name][1], dtype=dtype).reshape(
  182. opt_shape
  183. )
  184. max_arr = np.array(dynamic_shape_input_data[name][2], dtype=dtype).reshape(
  185. max_shape
  186. )
  187. else:
  188. min_arr = np.ones(min_shape, dtype=dtype)
  189. opt_arr = np.ones(opt_shape, dtype=dtype)
  190. max_arr = np.ones(max_shape, dtype=dtype)
  191. # refer to: https://github.com/PolaKuma/Paddle/blob/3347f225bc09f2ec09802a2090432dd5cb5b6739/test/tensorrt/test_converter_model_resnet50.py
  192. trt_input = Input((min_arr, opt_arr, max_arr))
  193. trt_inputs.append(trt_input)
  194. # Create TensorRTConfig
  195. trt_config = TensorRTConfig(inputs=trt_inputs)
  196. _set_trt_config()
  197. trt_config.save_model_dir = str(trt_save_path)
  198. pp_model_path = str(pp_model_file.with_suffix(""))
  199. convert(pp_model_path, trt_config)
  200. def _sort_inputs(inputs, names):
  201. # NOTE: Adjust input tensors to match the sorted sequence.
  202. indices = sorted(range(len(names)), key=names.__getitem__)
  203. inputs = [inputs[indices.index(i)] for i in range(len(inputs))]
  204. return inputs
  205. def _concatenate(*callables):
  206. def _chain(x):
  207. for c in callables:
  208. x = c(x)
  209. return x
  210. return _chain
  211. @benchmark.timeit
  212. class PaddleCopyToDevice:
  213. def __init__(self, device_type, device_id):
  214. self.device_type = device_type
  215. self.device_id = device_id
  216. def __call__(self, arrs):
  217. import paddle
  218. device_id = [self.device_id] if self.device_id is not None else self.device_id
  219. device = constr_device(self.device_type, device_id)
  220. paddle_tensors = [paddle.to_tensor(i, place=device) for i in arrs]
  221. return paddle_tensors
  222. @benchmark.timeit
  223. class PaddleCopyToHost:
  224. def __call__(self, paddle_tensors):
  225. arrs = [i.numpy() for i in paddle_tensors]
  226. return arrs
  227. @benchmark.timeit
  228. class PaddleModelInfer:
  229. def __init__(self, predictor):
  230. super().__init__()
  231. self.predictor = predictor
  232. def __call__(self, x):
  233. return self.predictor.run(x)
  234. # FIXME: Name might be misleading
  235. @benchmark.timeit
  236. class PaddleInferChainLegacy:
  237. def __init__(self, predictor):
  238. self.predictor = predictor
  239. input_names = self.predictor.get_input_names()
  240. self.input_handles = []
  241. self.output_handles = []
  242. for input_name in input_names:
  243. input_handle = self.predictor.get_input_handle(input_name)
  244. self.input_handles.append(input_handle)
  245. output_names = self.predictor.get_output_names()
  246. for output_name in output_names:
  247. output_handle = self.predictor.get_output_handle(output_name)
  248. self.output_handles.append(output_handle)
  249. def __call__(self, x):
  250. for input_, input_handle in zip(x, self.input_handles):
  251. input_handle.reshape(input_.shape)
  252. input_handle.copy_from_cpu(input_)
  253. self.predictor.run()
  254. outputs = [o.copy_to_cpu() for o in self.output_handles]
  255. return outputs
  256. class StaticInfer(metaclass=abc.ABCMeta):
  257. @abc.abstractmethod
  258. def __call__(self, x: Sequence[np.ndarray]) -> List[np.ndarray]:
  259. raise NotImplementedError
  260. class PaddleInfer(StaticInfer):
  261. def __init__(
  262. self,
  263. model_dir: str,
  264. model_file_prefix: str,
  265. option: PaddlePredictorOption,
  266. ) -> None:
  267. super().__init__()
  268. self.model_dir = model_dir
  269. self.model_file_prefix = model_file_prefix
  270. self._option = option
  271. self.predictor = self._create()
  272. if INFER_BENCHMARK_USE_NEW_INFER_API:
  273. device_type = self._option.device_type
  274. device_type = "gpu" if device_type == "dcu" else device_type
  275. copy_to_device = PaddleCopyToDevice(device_type, self._option.device_id)
  276. copy_to_host = PaddleCopyToHost()
  277. model_infer = PaddleModelInfer(self.predictor)
  278. self.infer = _concatenate(copy_to_device, model_infer, copy_to_host)
  279. else:
  280. self.infer = PaddleInferChainLegacy(self.predictor)
  281. def __call__(self, x: Sequence[np.ndarray]) -> List[np.ndarray]:
  282. names = self.predictor.get_input_names()
  283. if len(names) != len(x):
  284. raise ValueError(
  285. f"The number of inputs does not match the model: {len(names)} vs {len(x)}"
  286. )
  287. # TODO:
  288. # Ensure that input tensors follow the model's input sequence without sorting.
  289. x = _sort_inputs(x, names)
  290. x = list(map(np.ascontiguousarray, x))
  291. pred = self.infer(x)
  292. return pred
  293. def _create(
  294. self,
  295. ):
  296. """_create"""
  297. import paddle
  298. import paddle.inference
  299. model_paths = get_model_paths(self.model_dir, self.model_file_prefix)
  300. if "paddle" not in model_paths:
  301. raise RuntimeError("No valid PaddlePaddle model found")
  302. model_file, params_file = model_paths["paddle"]
  303. if (
  304. self._option.model_name == "LaTeX_OCR_rec"
  305. and self._option.device_type == "cpu"
  306. ):
  307. import cpuinfo
  308. if (
  309. "GenuineIntel" in cpuinfo.get_cpu_info().get("vendor_id_raw", "")
  310. and self._option.run_mode != "mkldnn"
  311. ):
  312. logging.warning(
  313. "Now, the `LaTeX_OCR_rec` model only support `mkldnn` mode when running on Intel CPU devices. So using `mkldnn` instead."
  314. )
  315. self._option.run_mode = "mkldnn"
  316. logging.debug("`run_mode` updated to 'mkldnn'")
  317. if self._option.device_type == "cpu" and self._option.device_id is not None:
  318. self._option.device_id = None
  319. logging.debug("`device_id` has been set to None")
  320. if (
  321. self._option.device_type in ("gpu", "dcu")
  322. and self._option.device_id is None
  323. ):
  324. self._option.device_id = 0
  325. logging.debug("`device_id` has been set to 0")
  326. # for TRT
  327. if self._option.run_mode.startswith("trt"):
  328. assert self._option.device_type == "gpu"
  329. cache_dir = self.model_dir / CACHE_DIR / "paddle"
  330. config = self._configure_trt(
  331. model_file,
  332. params_file,
  333. cache_dir,
  334. )
  335. config.exp_disable_mixed_precision_ops({"feed", "fetch"})
  336. config.enable_use_gpu(100, self._option.device_id)
  337. # for Native Paddle and MKLDNN
  338. else:
  339. config = paddle.inference.Config(str(model_file), str(params_file))
  340. if self._option.device_type == "gpu":
  341. config.exp_disable_mixed_precision_ops({"feed", "fetch"})
  342. from paddle.inference import PrecisionType
  343. precision = (
  344. PrecisionType.Half
  345. if self._option.run_mode == "paddle_fp16"
  346. else PrecisionType.Float32
  347. )
  348. config.enable_use_gpu(100, self._option.device_id, precision)
  349. if hasattr(config, "enable_new_ir"):
  350. config.enable_new_ir(self._option.enable_new_ir)
  351. if hasattr(config, "enable_new_executor"):
  352. config.enable_new_executor()
  353. config.set_optimization_level(3)
  354. elif self._option.device_type == "npu":
  355. config.enable_custom_device("npu")
  356. if hasattr(config, "enable_new_executor"):
  357. config.enable_new_executor()
  358. elif self._option.device_type == "xpu":
  359. if hasattr(config, "enable_new_executor"):
  360. config.enable_new_executor()
  361. elif self._option.device_type == "mlu":
  362. config.enable_custom_device("mlu")
  363. if hasattr(config, "enable_new_executor"):
  364. config.enable_new_executor()
  365. elif self._option.device_type == "gcu":
  366. from paddle_custom_device.gcu import passes as gcu_passes
  367. gcu_passes.setUp()
  368. config.enable_custom_device("gcu")
  369. if hasattr(config, "enable_new_executor"):
  370. config.enable_new_ir()
  371. config.enable_new_executor()
  372. else:
  373. pass_builder = config.pass_builder()
  374. name = "PaddleX_" + self._option.model_name
  375. gcu_passes.append_passes_for_legacy_ir(pass_builder, name)
  376. elif self._option.device_type == "dcu":
  377. config.enable_use_gpu(100, self._option.device_id)
  378. if hasattr(config, "enable_new_executor"):
  379. config.enable_new_executor()
  380. # XXX: is_compiled_with_rocm() must be True on dcu platform ?
  381. if paddle.is_compiled_with_rocm():
  382. # Delete unsupported passes in dcu
  383. config.delete_pass("conv2d_add_act_fuse_pass")
  384. config.delete_pass("conv2d_add_fuse_pass")
  385. else:
  386. assert self._option.device_type == "cpu"
  387. config.disable_gpu()
  388. if "mkldnn" in self._option.run_mode:
  389. try:
  390. config.enable_mkldnn()
  391. if "bf16" in self._option.run_mode:
  392. config.enable_mkldnn_bfloat16()
  393. except Exception:
  394. logging.warning(
  395. "MKL-DNN is not available. We will disable MKL-DNN."
  396. )
  397. config.set_mkldnn_cache_capacity(-1)
  398. else:
  399. if hasattr(config, "disable_mkldnn"):
  400. config.disable_mkldnn()
  401. config.set_cpu_math_library_num_threads(self._option.cpu_threads)
  402. if hasattr(config, "enable_new_ir"):
  403. config.enable_new_ir(self._option.enable_new_ir)
  404. if hasattr(config, "enable_new_executor"):
  405. config.enable_new_executor()
  406. config.set_optimization_level(3)
  407. config.enable_memory_optim()
  408. for del_p in self._option.delete_pass:
  409. config.delete_pass(del_p)
  410. # Disable paddle inference logging
  411. if not DEBUG:
  412. config.disable_glog_info()
  413. predictor = paddle.inference.create_predictor(config)
  414. return predictor
  415. def _configure_trt(self, model_file, params_file, cache_dir):
  416. # TODO: Support calibration
  417. import paddle.inference
  418. if USE_PIR_TRT:
  419. trt_save_path = cache_dir / "trt" / self.model_file_prefix
  420. _convert_trt(
  421. self._option.trt_cfg_setting,
  422. model_file,
  423. params_file,
  424. trt_save_path,
  425. self._option.device_id,
  426. self._option.trt_dynamic_shapes,
  427. self._option.trt_dynamic_shape_input_data,
  428. )
  429. model_file = trt_save_path.with_suffix(".json")
  430. params_file = trt_save_path.with_suffix(".pdiparams")
  431. config = paddle.inference.Config(str(model_file), str(params_file))
  432. else:
  433. config = paddle.inference.Config(str(model_file), str(params_file))
  434. config.set_optim_cache_dir(str(cache_dir / "optim_cache"))
  435. # call enable_use_gpu() first to use TensorRT engine
  436. config.enable_use_gpu(100, self._option.device_id)
  437. for func_name in self._option.trt_cfg_setting:
  438. assert hasattr(
  439. config, func_name
  440. ), f"The `{type(config)}` don't have function `{func_name}`!"
  441. args = self._option.trt_cfg_setting[func_name]
  442. if isinstance(args, list):
  443. getattr(config, func_name)(*args)
  444. else:
  445. getattr(config, func_name)(**args)
  446. if self._option.trt_use_dynamic_shapes:
  447. if self._option.trt_collect_shape_range_info:
  448. # NOTE: We always use a shape range info file.
  449. if self._option.trt_shape_range_info_path is not None:
  450. trt_shape_range_info_path = Path(
  451. self._option.trt_shape_range_info_path
  452. )
  453. else:
  454. trt_shape_range_info_path = cache_dir / "shape_range_info.pbtxt"
  455. should_collect_shape_range_info = True
  456. if not trt_shape_range_info_path.exists():
  457. trt_shape_range_info_path.parent.mkdir(
  458. parents=True, exist_ok=True
  459. )
  460. logging.info(
  461. f"Shape range info will be collected into {trt_shape_range_info_path}"
  462. )
  463. elif self._option.trt_discard_cached_shape_range_info:
  464. trt_shape_range_info_path.unlink()
  465. logging.info(
  466. f"The shape range info file ({trt_shape_range_info_path}) has been removed, and the shape range info will be re-collected."
  467. )
  468. else:
  469. logging.info(
  470. f"A shape range info file ({trt_shape_range_info_path}) already exists. There is no need to collect the info again."
  471. )
  472. should_collect_shape_range_info = False
  473. if should_collect_shape_range_info:
  474. _collect_trt_shape_range_info(
  475. str(model_file),
  476. str(params_file),
  477. self._option.device_id,
  478. str(trt_shape_range_info_path),
  479. self._option.trt_dynamic_shapes,
  480. self._option.trt_dynamic_shape_input_data,
  481. )
  482. if (
  483. self._option.model_name in DISABLE_TRT_HALF_OPS_CONFIG
  484. and self._option.run_mode == "trt_fp16"
  485. ):
  486. paddle.inference.InternalUtils.disable_tensorrt_half_ops(
  487. config, DISABLE_TRT_HALF_OPS_CONFIG[self._option.model_name]
  488. )
  489. config.enable_tuned_tensorrt_dynamic_shape(
  490. str(trt_shape_range_info_path),
  491. self._option.trt_allow_rebuild_at_runtime,
  492. )
  493. else:
  494. if self._option.trt_dynamic_shapes is not None:
  495. min_shapes, opt_shapes, max_shapes = {}, {}, {}
  496. for (
  497. key,
  498. shapes,
  499. ) in self._option.trt_dynamic_shapes.items():
  500. min_shapes[key] = shapes[0]
  501. opt_shapes[key] = shapes[1]
  502. max_shapes[key] = shapes[2]
  503. config.set_trt_dynamic_shape_info(
  504. min_shapes, max_shapes, opt_shapes
  505. )
  506. else:
  507. raise RuntimeError("No dynamic shape information provided")
  508. return config
  509. # FIXME: Name might be misleading
  510. @benchmark.timeit
  511. @class_requires_deps("ultra-infer")
  512. class MultiBackendInfer(object):
  513. def __init__(self, ui_runtime):
  514. super().__init__()
  515. self.ui_runtime = ui_runtime
  516. # The time consumed by the wrapper code will also be taken into account.
  517. def __call__(self, x):
  518. outputs = self.ui_runtime.infer(x)
  519. return outputs
  520. # TODO: It would be better to refactor the code to make `HPInfer` a higher-level
  521. # class that uses `PaddleInfer`.
  522. @class_requires_deps("ultra-infer")
  523. class HPInfer(StaticInfer):
  524. def __init__(
  525. self,
  526. model_dir: str,
  527. model_file_prefix: str,
  528. config: HPIConfig,
  529. ) -> None:
  530. super().__init__()
  531. self._model_dir = model_dir
  532. self._model_file_prefix = model_file_prefix
  533. self._config = config
  534. backend, backend_config = self._determine_backend_and_config()
  535. if backend == "paddle":
  536. self._use_paddle = True
  537. self._paddle_infer = self._build_paddle_infer(backend_config)
  538. else:
  539. self._use_paddle = False
  540. ui_runtime = self._build_ui_runtime(backend, backend_config)
  541. self._multi_backend_infer = MultiBackendInfer(ui_runtime)
  542. num_inputs = ui_runtime.num_inputs()
  543. self._input_names = [
  544. ui_runtime.get_input_info(i).name for i in range(num_inputs)
  545. ]
  546. @property
  547. def model_dir(self) -> str:
  548. return self._model_dir
  549. @property
  550. def model_file_prefix(self) -> str:
  551. return self._model_file_prefix
  552. @property
  553. def config(self) -> HPIConfig:
  554. return self._config
  555. def __call__(self, x: Sequence[np.ndarray]) -> List[np.ndarray]:
  556. if self._use_paddle:
  557. return self._call_paddle_infer(x)
  558. else:
  559. return self._call_multi_backend_infer(x)
  560. def _call_paddle_infer(self, x):
  561. return self._paddle_infer(x)
  562. def _call_multi_backend_infer(self, x):
  563. num_inputs = len(self._input_names)
  564. if len(x) != num_inputs:
  565. raise ValueError(f"Expected {num_inputs} inputs but got {len(x)} instead")
  566. x = _sort_inputs(x, self._input_names)
  567. inputs = {}
  568. for name, input_ in zip(self._input_names, x):
  569. inputs[name] = np.ascontiguousarray(input_)
  570. return self._multi_backend_infer(inputs)
  571. def _determine_backend_and_config(self):
  572. if self._config.auto_config:
  573. # Should we use the strategy pattern here to allow extensible
  574. # strategies?
  575. model_paths = get_model_paths(self._model_dir, self._model_file_prefix)
  576. ret = suggest_inference_backend_and_config(
  577. self._config,
  578. model_paths,
  579. )
  580. if ret[0] is None:
  581. # Should I use a custom exception?
  582. raise RuntimeError(
  583. f"No inference backend and configuration could be suggested. Reason: {ret[1]}"
  584. )
  585. backend, backend_config = ret
  586. else:
  587. backend = self._config.backend
  588. if backend is None:
  589. raise RuntimeError(
  590. "When automatic configuration is not used, the inference backend must be specified manually."
  591. )
  592. backend_config = self._config.backend_config or {}
  593. if backend == "paddle" and not backend_config:
  594. logging.warning(
  595. "The Paddle Inference backend is selected with the default configuration. This may not provide optimal performance."
  596. )
  597. return backend, backend_config
  598. def _build_paddle_infer(self, backend_config):
  599. kwargs = {
  600. "device_type": self._config.device_type,
  601. "device_id": self._config.device_id,
  602. **backend_config,
  603. }
  604. # TODO: This is probably redundant. Can we reuse the code in the
  605. # predictor class?
  606. paddle_info = self._config.hpi_info.backend_configs.paddle_infer
  607. if paddle_info is not None:
  608. if (
  609. kwargs.get("trt_dynamic_shapes") is None
  610. and paddle_info.trt_dynamic_shapes is not None
  611. ):
  612. trt_dynamic_shapes = paddle_info.trt_dynamic_shapes
  613. logging.debug("TensorRT dynamic shapes set to %s", trt_dynamic_shapes)
  614. kwargs["trt_dynamic_shapes"] = trt_dynamic_shapes
  615. if (
  616. kwargs.get("trt_dynamic_shape_input_data") is None
  617. and paddle_info.trt_dynamic_shape_input_data is not None
  618. ):
  619. trt_dynamic_shape_input_data = paddle_info.trt_dynamic_shape_input_data
  620. logging.debug(
  621. "TensorRT dynamic shape input data set to %s",
  622. trt_dynamic_shape_input_data,
  623. )
  624. kwargs["trt_dynamic_shape_input_data"] = trt_dynamic_shape_input_data
  625. pp_option = PaddlePredictorOption(self._config.pdx_model_name, **kwargs)
  626. logging.info("Using Paddle Inference backend")
  627. logging.info("Paddle predictor option: %s", pp_option)
  628. return PaddleInfer(self._model_dir, self._model_file_prefix, option=pp_option)
  629. def _build_ui_runtime(self, backend, backend_config, ui_option=None):
  630. from ultra_infer import ModelFormat, Runtime, RuntimeOption
  631. if ui_option is None:
  632. ui_option = RuntimeOption()
  633. if self._config.device_type == "cpu":
  634. pass
  635. elif self._config.device_type == "gpu":
  636. ui_option.use_gpu(self._config.device_id or 0)
  637. elif self._config.device_type == "npu":
  638. ui_option.use_ascend(self._config.device_id or 0)
  639. else:
  640. raise RuntimeError(
  641. f"Unsupported device type {repr(self._config.device_type)}"
  642. )
  643. model_paths = get_model_paths(self.model_dir, self.model_file_prefix)
  644. if backend in ("openvino", "onnxruntime", "tensorrt"):
  645. # XXX: This introduces side effects.
  646. if "onnx" not in model_paths:
  647. if self._config.auto_paddle2onnx:
  648. if "paddle" not in model_paths:
  649. raise RuntimeError("PaddlePaddle model required")
  650. # The CLI is used here since there is currently no API.
  651. logging.info(
  652. "Automatically converting PaddlePaddle model to ONNX format"
  653. )
  654. try:
  655. subprocess.run(
  656. [
  657. "paddlex",
  658. "--paddle2onnx",
  659. "--paddle_model_dir",
  660. self._model_dir,
  661. "--onnx_model_dir",
  662. self._model_dir,
  663. ],
  664. capture_output=True,
  665. check=True,
  666. text=True,
  667. )
  668. except subprocess.CalledProcessError as e:
  669. raise RuntimeError(
  670. f"PaddlePaddle-to-ONNX conversion failed:\n{e.stderr}"
  671. ) from e
  672. model_paths = get_model_paths(
  673. self.model_dir, self.model_file_prefix
  674. )
  675. assert "onnx" in model_paths
  676. else:
  677. raise RuntimeError("ONNX model required")
  678. ui_option.set_model_path(str(model_paths["onnx"]), "", ModelFormat.ONNX)
  679. elif backend == "om":
  680. if "om" not in model_paths:
  681. raise RuntimeError("OM model required")
  682. ui_option.set_model_path(str(model_paths["om"]), "", ModelFormat.OM)
  683. else:
  684. raise ValueError(f"Unsupported inference backend {repr(backend)}")
  685. if backend == "openvino":
  686. backend_config = OpenVINOConfig.model_validate(backend_config)
  687. ui_option.use_openvino_backend()
  688. ui_option.set_cpu_thread_num(backend_config.cpu_num_threads)
  689. elif backend == "onnxruntime":
  690. backend_config = ONNXRuntimeConfig.model_validate(backend_config)
  691. ui_option.use_ort_backend()
  692. ui_option.set_cpu_thread_num(backend_config.cpu_num_threads)
  693. elif backend == "tensorrt":
  694. if (
  695. backend_config.get("use_dynamic_shapes", True)
  696. and backend_config.get("dynamic_shapes") is None
  697. ):
  698. trt_info = self._config.hpi_info.backend_configs.tensorrt
  699. if trt_info is not None and trt_info.dynamic_shapes is not None:
  700. trt_dynamic_shapes = trt_info.dynamic_shapes
  701. logging.debug(
  702. "TensorRT dynamic shapes set to %s", trt_dynamic_shapes
  703. )
  704. backend_config = {
  705. **backend_config,
  706. "dynamic_shapes": trt_dynamic_shapes,
  707. }
  708. backend_config = TensorRTConfig.model_validate(backend_config)
  709. ui_option.use_trt_backend()
  710. cache_dir = self.model_dir / CACHE_DIR / "tensorrt"
  711. cache_dir.mkdir(parents=True, exist_ok=True)
  712. ui_option.trt_option.serialize_file = str(cache_dir / "trt_serialized.trt")
  713. if backend_config.precision == "FP16":
  714. ui_option.trt_option.enable_fp16 = True
  715. if not backend_config.use_dynamic_shapes:
  716. raise RuntimeError(
  717. "TensorRT static shape inference is currently not supported"
  718. )
  719. if backend_config.dynamic_shapes is not None:
  720. if not Path(ui_option.trt_option.serialize_file).exists():
  721. for name, shapes in backend_config.dynamic_shapes.items():
  722. ui_option.trt_option.set_shape(name, *shapes)
  723. else:
  724. logging.warning(
  725. "TensorRT dynamic shapes will be loaded from the file."
  726. )
  727. elif backend == "om":
  728. backend_config = OMConfig.model_validate(backend_config)
  729. ui_option.use_om_backend()
  730. else:
  731. raise ValueError(f"Unsupported inference backend {repr(backend)}")
  732. logging.info("Inference backend: %s", backend)
  733. logging.info("Inference backend config: %s", backend_config)
  734. ui_runtime = Runtime(ui_option)
  735. return ui_runtime