server.py 2.3 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667
  1. import os
  2. import sys
  3. from mineru.backend.vlm.custom_logits_processors import enable_custom_logits_processors
  4. from mineru.backend.vlm.utils import set_defult_gpu_memory_utilization
  5. from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
  6. from vllm.entrypoints.cli.main import main as vllm_main
  7. def main():
  8. args = sys.argv[1:]
  9. has_port_arg = False
  10. has_gpu_memory_utilization_arg = False
  11. has_logits_processors_arg = False
  12. model_path = None
  13. model_arg_indices = []
  14. # 检查现有参数
  15. for i, arg in enumerate(args):
  16. if arg == "--port" or arg.startswith("--port="):
  17. has_port_arg = True
  18. if arg == "--gpu-memory-utilization" or arg.startswith("--gpu-memory-utilization="):
  19. has_gpu_memory_utilization_arg = True
  20. if arg == "--logits-processors" or arg.startswith("--logits-processors="):
  21. has_logits_processors_arg = True
  22. if arg == "--model":
  23. if i + 1 < len(args):
  24. model_path = args[i + 1]
  25. model_arg_indices.extend([i, i + 1])
  26. elif arg.startswith("--model="):
  27. model_path = arg.split("=", 1)[1]
  28. model_arg_indices.append(i)
  29. # 从参数列表中移除 --model 参数
  30. if model_arg_indices:
  31. for index in sorted(model_arg_indices, reverse=True):
  32. args.pop(index)
  33. custom_logits_processors = enable_custom_logits_processors()
  34. # 添加默认参数
  35. if not has_port_arg:
  36. args.extend(["--port", "30000"])
  37. if not has_gpu_memory_utilization_arg:
  38. gpu_memory_utilization = str(set_defult_gpu_memory_utilization())
  39. args.extend(["--gpu-memory-utilization", gpu_memory_utilization])
  40. if not model_path:
  41. model_path = auto_download_and_get_model_root_path("/", "vlm")
  42. if (not has_logits_processors_arg) and custom_logits_processors:
  43. args.extend(["--logits-processors", "mineru_vl_utils:MinerULogitsProcessor"])
  44. # 重构参数,将模型路径作为位置参数
  45. sys.argv = [sys.argv[0]] + ["serve", model_path] + args
  46. if os.getenv('OMP_NUM_THREADS') is None:
  47. os.environ["OMP_NUM_THREADS"] = "1"
  48. # 启动vllm服务器
  49. print(f"start vllm server: {sys.argv}")
  50. vllm_main()
  51. if __name__ == "__main__":
  52. main()