hpi.py 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import ctypes.util
  15. import importlib.resources
  16. import importlib.util
  17. import json
  18. import platform
  19. from collections import defaultdict
  20. from functools import lru_cache
  21. from typing import Any, Dict, List, Literal, Optional, Tuple, Union
  22. from pydantic import BaseModel, Field
  23. from typing_extensions import Annotated, TypeAlias
  24. from ...utils.deps import function_requires_deps, is_paddle2onnx_plugin_available
  25. from ...utils.env import get_paddle_cuda_version, get_paddle_version
  26. from ...utils.flags import USE_PIR_TRT
  27. from .misc import is_mkldnn_available
  28. from .model_paths import ModelPaths
  29. class PaddleInferenceInfo(BaseModel):
  30. trt_dynamic_shapes: Optional[Dict[str, List[List[int]]]] = None
  31. trt_dynamic_shape_input_data: Optional[Dict[str, List[List[float]]]] = None
  32. class TensorRTInfo(BaseModel):
  33. dynamic_shapes: Optional[Dict[str, List[List[int]]]] = None
  34. class InferenceBackendInfoCollection(BaseModel):
  35. paddle_infer: Optional[PaddleInferenceInfo] = None
  36. tensorrt: Optional[TensorRTInfo] = None
  37. # Does using `TypedDict` make things more convenient?
  38. class HPIInfo(BaseModel):
  39. backend_configs: Optional[InferenceBackendInfoCollection] = None
  40. # For multi-backend inference only
  41. InferenceBackend: TypeAlias = Literal[
  42. "paddle", "openvino", "onnxruntime", "tensorrt", "om"
  43. ]
  44. class OpenVINOConfig(BaseModel):
  45. cpu_num_threads: int = 10
  46. class ONNXRuntimeConfig(BaseModel):
  47. cpu_num_threads: int = 10
  48. class TensorRTConfig(BaseModel):
  49. precision: Literal["fp32", "fp16"] = "fp32"
  50. use_dynamic_shapes: bool = True
  51. dynamic_shapes: Optional[Dict[str, List[List[int]]]] = None
  52. # TODO: Control caching behavior
  53. class OMConfig(BaseModel):
  54. pass
  55. class HPIConfig(BaseModel):
  56. pdx_model_name: Annotated[str, Field(alias="model_name")]
  57. device_type: str
  58. device_id: Optional[int] = None
  59. auto_config: bool = True
  60. backend: Optional[InferenceBackend] = None
  61. backend_config: Optional[Dict[str, Any]] = None
  62. hpi_info: Optional[HPIInfo] = None
  63. auto_paddle2onnx: bool = True
  64. # TODO: Add more validation logic here
  65. class ModelInfo(BaseModel):
  66. name: str
  67. hpi_info: Optional[HPIInfo] = None
  68. ModelFormat: TypeAlias = Literal["paddle", "onnx", "om"]
  69. @lru_cache(1)
  70. def _get_hpi_model_info_collection():
  71. with importlib.resources.open_text(
  72. __package__, "hpi_model_info_collection.json", encoding="utf-8"
  73. ) as f:
  74. hpi_model_info_collection = json.load(f)
  75. return hpi_model_info_collection
  76. @function_requires_deps("ultra-infer")
  77. def suggest_inference_backend_and_config(
  78. hpi_config: HPIConfig,
  79. model_paths: ModelPaths,
  80. ) -> Union[Tuple[InferenceBackend, Dict[str, Any]], Tuple[None, str]]:
  81. # TODO: The current strategy is naive. It would be better to consider
  82. # additional important factors, such as NVIDIA GPU compute capability and
  83. # device manufacturers. We should also allow users to provide hints.
  84. from ultra_infer import (
  85. is_built_with_om,
  86. is_built_with_openvino,
  87. is_built_with_ort,
  88. is_built_with_trt,
  89. )
  90. is_onnx_model_available = "onnx" in model_paths
  91. # TODO: Give a warning if the Paddle2ONNX plugin is not available but
  92. # can be used to select a better backend.
  93. if hpi_config.auto_paddle2onnx and is_paddle2onnx_plugin_available():
  94. is_onnx_model_available = is_onnx_model_available or "paddle" in model_paths
  95. available_backends = []
  96. if "paddle" in model_paths:
  97. available_backends.append("paddle")
  98. if (
  99. is_built_with_openvino()
  100. and is_onnx_model_available
  101. and hpi_config.device_type == "cpu"
  102. ):
  103. available_backends.append("openvino")
  104. if (
  105. is_built_with_ort()
  106. and is_onnx_model_available
  107. and hpi_config.device_type in ("cpu", "gpu")
  108. ):
  109. available_backends.append("onnxruntime")
  110. if (
  111. is_built_with_trt()
  112. and is_onnx_model_available
  113. and hpi_config.device_type == "gpu"
  114. ):
  115. available_backends.append("tensorrt")
  116. if is_built_with_om() and "om" in model_paths and hpi_config.device_type == "npu":
  117. available_backends.append("om")
  118. if not available_backends:
  119. return None, "No inference backends are available."
  120. if hpi_config.backend is not None and hpi_config.backend not in available_backends:
  121. return None, f"Inference backend {repr(hpi_config.backend)} is unavailable."
  122. paddle_version = get_paddle_version()
  123. if (3, 0) <= paddle_version[:2] <= (3, 1) and paddle_version[3] is None:
  124. if paddle_version[2] == 0:
  125. paddle_version = f"paddle{paddle_version[0]}{paddle_version[1]}"
  126. else:
  127. paddle_version = (
  128. f"paddle{paddle_version[0]}{paddle_version[1]}{paddle_version[2]}"
  129. )
  130. else:
  131. return (
  132. None,
  133. f"{paddle_version} is not a supported Paddle version.",
  134. )
  135. if hpi_config.device_type == "cpu":
  136. uname = platform.uname()
  137. arch = uname.machine.lower()
  138. if arch == "x86_64":
  139. key = "cpu_x64"
  140. else:
  141. return None, f"{repr(arch)} is not a supported architecture."
  142. elif hpi_config.device_type == "gpu":
  143. # TODO: Is it better to also check the runtime versions of CUDA and
  144. # cuDNN, and the versions of CUDA and cuDNN used to build `ultra-infer`?
  145. cuda_version = get_paddle_cuda_version()
  146. if not cuda_version:
  147. return None, "No CUDA version was found."
  148. cuda_version = cuda_version[0]
  149. key = f"gpu_cuda{cuda_version}"
  150. else:
  151. return None, f"{repr(hpi_config.device_type)} is not a supported device type."
  152. hpi_model_info_collection = _get_hpi_model_info_collection()
  153. if key not in hpi_model_info_collection:
  154. return None, "No prior knowledge can be utilized."
  155. hpi_model_info_collection_for_env = hpi_model_info_collection[key][paddle_version]
  156. if hpi_config.pdx_model_name not in hpi_model_info_collection_for_env:
  157. return None, f"{repr(hpi_config.pdx_model_name)} is not a known model."
  158. supported_pseudo_backends = hpi_model_info_collection_for_env[
  159. hpi_config.pdx_model_name
  160. ].copy()
  161. if not (is_mkldnn_available() and hpi_config.device_type == "cpu"):
  162. for pb in supported_pseudo_backends[:]:
  163. if pb.startswith("paddle_mkldnn"):
  164. supported_pseudo_backends.remove(pb)
  165. # XXX
  166. if not (
  167. USE_PIR_TRT
  168. and importlib.util.find_spec("tensorrt")
  169. and ctypes.util.find_library("nvinfer")
  170. and hpi_config.device_type == "gpu"
  171. ):
  172. for pb in supported_pseudo_backends[:]:
  173. if pb.startswith("paddle_tensorrt"):
  174. supported_pseudo_backends.remove(pb)
  175. supported_backends = []
  176. backend_to_pseudo_backends = defaultdict(list)
  177. for pb in supported_pseudo_backends:
  178. if pb.startswith("paddle"):
  179. backend = "paddle"
  180. elif pb.startswith("tensorrt"):
  181. backend = "tensorrt"
  182. else:
  183. backend = pb
  184. if available_backends is not None and backend not in available_backends:
  185. continue
  186. supported_backends.append(backend)
  187. backend_to_pseudo_backends[backend].append(pb)
  188. if not supported_backends:
  189. return None, "No inference backend can be selected."
  190. if hpi_config.backend is not None:
  191. if hpi_config.backend not in supported_backends:
  192. return (
  193. None,
  194. f"{repr(hpi_config.backend)} is not a supported inference backend.",
  195. )
  196. suggested_backend = hpi_config.backend
  197. else:
  198. # Prefer the first one.
  199. suggested_backend = supported_backends[0]
  200. pseudo_backends = backend_to_pseudo_backends[suggested_backend]
  201. if hpi_config.backend_config is not None:
  202. requested_base_pseudo_backend = None
  203. if suggested_backend == "paddle":
  204. if "run_mode" in hpi_config.backend_config:
  205. if hpi_config.backend_config["run_mode"].startswith("mkldnn"):
  206. requested_base_pseudo_backend = "paddle_mkldnn"
  207. elif hpi_config.backend_config["run_mode"].startswith("trt"):
  208. requested_base_pseudo_backend = "paddle_tensorrt"
  209. if requested_base_pseudo_backend:
  210. for pb in pseudo_backends:
  211. if pb.startswith(requested_base_pseudo_backend):
  212. break
  213. else:
  214. return None, "Unsupported backend configuration."
  215. pseudo_backend = pseudo_backends[0]
  216. suggested_backend_config = {}
  217. if suggested_backend == "paddle":
  218. assert pseudo_backend in (
  219. "paddle",
  220. "paddle_fp16",
  221. "paddle_mkldnn",
  222. "paddle_tensorrt",
  223. "paddle_tensorrt_fp16",
  224. ), pseudo_backend
  225. if pseudo_backend == "paddle":
  226. suggested_backend_config.update({"run_mode": "paddle"})
  227. elif pseudo_backend == "paddle_fp16":
  228. suggested_backend_config.update({"run_mode": "paddle_fp16"})
  229. elif pseudo_backend == "paddle_mkldnn":
  230. suggested_backend_config.update({"run_mode": "mkldnn"})
  231. elif pseudo_backend == "paddle_tensorrt":
  232. suggested_backend_config.update({"run_mode": "trt_fp32"})
  233. elif pseudo_backend == "paddle_tensorrt_fp16":
  234. # TODO: Check if the target device supports FP16.
  235. suggested_backend_config.update({"run_mode": "trt_fp16"})
  236. elif suggested_backend == "tensorrt":
  237. assert pseudo_backend in ("tensorrt", "tensorrt_fp16"), pseudo_backend
  238. if pseudo_backend == "tensorrt_fp16":
  239. suggested_backend_config.update({"precision": "fp16"})
  240. if hpi_config.backend_config is not None:
  241. suggested_backend_config.update(hpi_config.backend_config)
  242. return suggested_backend, suggested_backend_config