c_lib_wrap.py.in 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. import logging
  16. import os
  17. import sys
  18. user_specified_dirs = ['@OPENCV_DIRECTORY@', '@ORT_DIRECTORY@', ]
  19. def is_built_with_gpu() -> bool:
  20. return True if "@WITH_GPU@" == "ON" else False
  21. def is_built_with_ort() -> bool:
  22. return True if "@ENABLE_ORT_BACKEND@" == "ON" else False
  23. def is_built_with_trt() -> bool:
  24. return True if "@ENABLE_TRT_BACKEND@" == "ON" else False
  25. def is_built_with_paddle() -> bool:
  26. return True if "@ENABLE_PADDLE_BACKEND@" == "ON" else False
  27. def is_built_with_poros() ->bool:
  28. return True if "@ENABLE_POROS_BACKEND@" == "ON" else False
  29. def is_built_with_openvino() ->bool:
  30. return True if "@ENABLE_OPENVINO_BACKEND@" == "ON" else False
  31. def get_default_cuda_directory() -> str:
  32. if not is_built_with_gpu():
  33. return ""
  34. return r"@CUDA_DIRECTORY@".strip()
  35. def get_default_cuda_major_version() -> str:
  36. if not is_built_with_gpu():
  37. return ""
  38. # TODO(qiuyanjun): get cuda version from cmake.
  39. return "11"
  40. def find_cudart(search_dir: str) -> bool:
  41. if search_dir is None:
  42. logging.info("[UltraInfer][ERROR]: search_dir can not be NoneTpye.")
  43. return False
  44. # TODO(qiuyanjun): add Linux cudart *.so check
  45. cudart_lib_name = f"cudart64_{get_default_cuda_major_version()}0.dll"
  46. cudart_lib_path = os.path.join(search_dir, cudart_lib_name)
  47. return os.path.exists(cudart_lib_path)
  48. def find_cudart_from_sys() -> bool:
  49. # TODO(qiuyanjun): add Linux system paths
  50. sys_paths = os.environ["path"].strip().split(";")
  51. for sys_path in sys_paths:
  52. if find_cudart(sys_path):
  53. logging.info(f"[UltraInfer][INFO]: Successfully found CUDA ToolKit from system PATH env -> {sys_path}")
  54. return True
  55. return False
  56. def add_system_search_paths():
  57. # TODO(qiuyanjun): add Linux system paths
  58. sys_paths = os.environ["path"].strip().split(";")
  59. for sys_path in sys_paths:
  60. if os.path.exists(sys_path) and sys.version_info[:2] >= (3, 8):
  61. try:
  62. os.add_dll_directory(sys_path)
  63. except:
  64. continue
  65. def add_dll_search_dir(dir_path):
  66. os.environ["path"] = dir_path + ";" + os.environ["path"]
  67. sys.path.insert(0, dir_path)
  68. if sys.version_info[:2] >= (3, 8):
  69. os.add_dll_directory(dir_path)
  70. def add_custom_cuda_path():
  71. if is_built_with_gpu():
  72. # if UltraInfer built with gpu and want to run
  73. # in windows, we need to add CUDA_DIRECTORY into
  74. # dll search paths to make sure UltraInfer.dll
  75. # can link cudart correctly. we search the
  76. # default path firstly and try to add into
  77. # paths. User should set it manually if the
  78. # cuda toolkit is not locate in the default
  79. # path we assume.
  80. base_url = "https://github.com/PaddlePaddle/FastDeploy/blob/"
  81. default_cuda_dir = get_default_cuda_directory()
  82. default_cuda_version = get_default_cuda_major_version() # 11
  83. cuda_shared_lib_dir = os.path.join(default_cuda_dir, "bin")
  84. custom_cuda_envs = ["CUDA_DIRECTORY", "CUDA_HOME", "CUDA_ROOT", "CUDA_PATH"]
  85. custom_cuda_dir = "NOTFOUNDED"
  86. if not os.path.exists(cuda_shared_lib_dir):
  87. # try to get cuda directory from user's local env
  88. for custom_env in custom_cuda_envs:
  89. custom_cuda_dir = os.getenv(custom_env, "NOTFOUNDED")
  90. custom_cuda_dir = custom_cuda_dir.strip().split(";")[0]
  91. if os.path.exists(custom_cuda_dir) and custom_cuda_dir != "NOTFOUNDED":
  92. break
  93. if not os.path.exists(custom_cuda_dir) or custom_cuda_dir == "NOTFOUNDED":
  94. logging.warnings.warn(f"\n--- UltraInfer was built with gpu, \
  95. \n--- but the default cuda directory does not exists. \
  96. \n--- Please setup one of {custom_cuda_envs} manually, \
  97. \n--- this path should look like: {default_cuda_dir}. \
  98. \n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
  99. return
  100. # path to cuda dlls
  101. cuda_shared_lib_dir = os.path.join(custom_cuda_dir, "bin")
  102. add_dll_search_dir(cuda_shared_lib_dir)
  103. # try pre find cudart with major version, e.g 11.x/10.x
  104. if not find_cudart(cuda_shared_lib_dir):
  105. custom_cuda_version = os.path.basename(custom_cuda_dir)
  106. logging.warnings.warn(
  107. f"\n--- UltraInfer was built with CUDA major version {default_cuda_version}, \
  108. \n--- but found custom CUDA version {custom_cuda_version} at {custom_cuda_dir} \
  109. \n--- Please setup one of {custom_cuda_envs} manually, \
  110. \n--- this path should look like: {default_cuda_dir}. \
  111. \n--- Check FAQ: {base_url + 'develop/docs/FAQ.md'}")
  112. return
  113. logging.info(f"[UltraInfer][INFO]: Successfully found CUDA ToolKit from -> {cuda_shared_lib_dir}")
  114. if os.name == "nt":
  115. # cuda/cudnn libs
  116. if is_built_with_gpu():
  117. add_system_search_paths()
  118. if not find_cudart_from_sys():
  119. add_custom_cuda_path()
  120. current_path = os.path.abspath(__file__)
  121. dirname = os.path.dirname(current_path)
  122. third_libs_dir = os.path.join(dirname, "libs")
  123. all_dirs = user_specified_dirs + [third_libs_dir]
  124. for dir in all_dirs:
  125. if os.path.exists(dir):
  126. add_dll_search_dir(dir)
  127. for root, dirs, filenames in os.walk(dir):
  128. for d in dirs:
  129. if d == "lib" or d == "bin":
  130. add_dll_search_dir(os.path.join(dirname, root, d))
  131. try:
  132. from .libs.@PY_LIBRARY_NAME@ import *
  133. except Exception as e:
  134. raise RuntimeError(f"UltraInfer initalized failed! Error: {e}")
  135. def TensorInfoStr(tensor_info):
  136. message = "TensorInfo(name : '{}', dtype : '{}', shape : '{}')".format(
  137. tensor_info.name, tensor_info.dtype, tensor_info.shape)
  138. return message
  139. def RuntimeOptionStr(runtime_option):
  140. attrs = dir(runtime_option)
  141. message = "RuntimeOption(\n"
  142. for attr in attrs:
  143. if attr.startswith("__"):
  144. continue
  145. if hasattr(getattr(runtime_option, attr), "__call__"):
  146. continue
  147. message += " {} : {}\t\n".format(attr, getattr(runtime_option, attr))
  148. message.strip("\n")
  149. message += ")"
  150. return message
  151. TensorInfo.__repr__ = TensorInfoStr
  152. RuntimeOption.__repr__ = RuntimeOptionStr