c_lib_wrap.py.in 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. import logging
  16. import os
  17. import sys
  18. user_specified_dirs = ['@OPENCV_DIRECTORY@', '@ORT_DIRECTORY@', ]
  19. def is_built_with_gpu() -> bool:
  20. return True if "@WITH_GPU@" == "ON" else False
  21. def is_built_with_ort() -> bool:
  22. return True if "@ENABLE_ORT_BACKEND@" == "ON" else False
  23. def is_built_with_trt() -> bool:
  24. return True if "@ENABLE_TRT_BACKEND@" == "ON" else False
  25. def is_built_with_paddle() -> bool:
  26. return True if "@ENABLE_PADDLE_BACKEND@" == "ON" else False
  27. def is_built_with_poros() ->bool:
  28. return True if "@ENABLE_POROS_BACKEND@" == "ON" else False
  29. def is_built_with_openvino() ->bool:
  30. return True if "@ENABLE_OPENVINO_BACKEND@" == "ON" else False
  31. def is_built_with_om() ->bool:
  32. return True if "@ENABLE_OM_BACKEND@" == "ON" else False
  33. def get_default_cuda_directory() -> str:
  34. if not is_built_with_gpu():
  35. return ""
  36. return r"@CUDA_DIRECTORY@".strip()
  37. def get_default_cuda_major_version() -> str:
  38. if not is_built_with_gpu():
  39. return ""
  40. # TODO(qiuyanjun): get cuda version from cmake.
  41. return "11"
  42. def find_cudart(search_dir: str) -> bool:
  43. if search_dir is None:
  44. logging.info("[UltraInfer][ERROR]: search_dir can not be NoneTpye.")
  45. return False
  46. # TODO(qiuyanjun): add Linux cudart *.so check
  47. cudart_lib_name = f"cudart64_{get_default_cuda_major_version()}0.dll"
  48. cudart_lib_path = os.path.join(search_dir, cudart_lib_name)
  49. return os.path.exists(cudart_lib_path)
  50. def find_cudart_from_sys() -> bool:
  51. # TODO(qiuyanjun): add Linux system paths
  52. sys_paths = os.environ["path"].strip().split(";")
  53. for sys_path in sys_paths:
  54. if find_cudart(sys_path):
  55. logging.info(f"[UltraInfer][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
  56. return True
  57. return False
  58. def add_system_search_paths():
  59. # TODO(qiuyanjun): add Linux system paths
  60. sys_paths = os.environ["path"].strip().split(";")
  61. for sys_path in sys_paths:
  62. if os.path.exists(sys_path) and sys.version_info[:2] >= (3, 8):
  63. try:
  64. os.add_dll_directory(sys_path)
  65. except:
  66. continue
  67. def add_dll_search_dir(dir_path):
  68. os.environ["path"] = dir_path + ";" + os.environ["path"]
  69. sys.path.insert(0, dir_path)
  70. if sys.version_info[:2] >= (3, 8):
  71. os.add_dll_directory(dir_path)
  72. def add_custom_cuda_path():
  73. if is_built_with_gpu():
  74. # if UltraInfer built with gpu and want to run
  75. # in windows, we need to add CUDA_DIRECTORY into
  76. # dll search paths to make sure UltraInfer.dll
  77. # can link cudart correctly. we search the
  78. # default path firstly and try to add into
  79. # paths. User should set it manually if the
  80. # cuda toolkit is not locate in the default
  81. # path we assume.
  82. base_url = "https://github.com/PaddlePaddle/FastDeploy/blob/"
  83. default_cuda_dir = get_default_cuda_directory()
  84. default_cuda_version = get_default_cuda_major_version() # 11
  85. cuda_shared_lib_dir = os.path.join(default_cuda_dir, "bin")
  86. custom_cuda_envs = ["CUDA_DIRECTORY", "CUDA_HOME", "CUDA_ROOT", "CUDA_PATH"]
  87. custom_cuda_dir = "NOTFOUNDED"
  88. if not os.path.exists(cuda_shared_lib_dir):
  89. # try to get cuda directory from user's local env
  90. for custom_env in custom_cuda_envs:
  91. custom_cuda_dir = os.getenv(custom_env, "NOTFOUNDED")
  92. custom_cuda_dir = custom_cuda_dir.strip().split(";")[0]
  93. if os.path.exists(custom_cuda_dir) and custom_cuda_dir != "NOTFOUNDED":
  94. break
  95. if not os.path.exists(custom_cuda_dir) or custom_cuda_dir == "NOTFOUNDED":
  96. logging.warnings.warn(f"\n--- UltraInfer was built with gpu, \
  97. \n--- but the default cuda directory does not exists. \
  98. \n--- Please setup one of {custom_cuda_envs} manually, \
  99. \n--- this path should look like: {default_cuda_dir}. \
  100. \n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
  101. return
  102. # path to cuda dlls
  103. cuda_shared_lib_dir = os.path.join(custom_cuda_dir, "bin")
  104. add_dll_search_dir(cuda_shared_lib_dir)
  105. # try pre find cudart with major version, e.g 11.x/10.x
  106. if not find_cudart(cuda_shared_lib_dir):
  107. custom_cuda_version = os.path.basename(custom_cuda_dir)
  108. logging.warnings.warn(
  109. f"\n--- UltraInfer was built with CUDA major version {default_cuda_version}, \
  110. \n--- but found custom CUDA version {custom_cuda_version} at {custom_cuda_dir} \
  111. \n--- Please setup one of {custom_cuda_envs} manually, \
  112. \n--- this path should look like: {default_cuda_dir}. \
  113. \n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
  114. return
  115. logging.info(f"[UltraInfer][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
  116. if os.name == "nt":
  117. # cuda/cudnn libs
  118. if is_built_with_gpu():
  119. add_system_search_paths()
  120. if not find_cudart_from_sys():
  121. add_custom_cuda_path()
  122. current_path = os.path.abspath(__file__)
  123. dirname = os.path.dirname(current_path)
  124. third_libs_dir = os.path.join(dirname, "libs")
  125. all_dirs = user_specified_dirs + [third_libs_dir]
  126. for dir in all_dirs:
  127. if os.path.exists(dir):
  128. add_dll_search_dir(dir)
  129. for root, dirs, filenames in os.walk(dir):
  130. for d in dirs:
  131. if d == "lib" or d == "bin":
  132. add_dll_search_dir(os.path.join(dirname, root, d))
  133. try:
  134. from .libs.@PY_LIBRARY_NAME@ import *
  135. except Exception as e:
  136. raise RuntimeError(f"UltraInfer initialized failed! Error: {e}")
  137. def TensorInfoStr(tensor_info):
  138. message = "TensorInfo(name : '{}', dtype : '{}', shape : '{}')".format(
  139. tensor_info.name, tensor_info.dtype, tensor_info.shape)
  140. return message
  141. def RuntimeOptionStr(runtime_option):
  142. attrs = dir(runtime_option)
  143. message = "RuntimeOption(\n"
  144. for attr in attrs:
  145. if attr.startswith("__"):
  146. continue
  147. if hasattr(getattr(runtime_option, attr), "__call__"):
  148. continue
  149. message += " {} : {}\t\n".format(attr, getattr(runtime_option, attr))
  150. message.strip("\n")
  151. message += ")"
  152. return message
  153. TensorInfo.__repr__ = TensorInfoStr
  154. RuntimeOption.__repr__ = RuntimeOptionStr