server.py 872 B

12345678910111213141516171819202122232425262728293031
  1. import sys
  2. from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
  3. from vllm.entrypoints.cli.main import main as vllm_main
  4. def main():
  5. # 检查命令行参数中是否包含--model-path
  6. args = sys.argv[1:]
  7. has_model_path_arg = False
  8. for i, arg in enumerate(args):
  9. if arg == "--model" or arg.startswith("--model="):
  10. has_model_path_arg = True
  11. break
  12. # 如果没有--model-path参数,在参数列表中添加它
  13. if not has_model_path_arg:
  14. default_path = auto_download_and_get_model_root_path("/", "vlm")
  15. args.extend(["--model", default_path])
  16. # 重新构造sys.argv,以便透传所有参数给vllm
  17. sys.argv = [sys.argv[0]] + args
  18. # 启动vllm服务器
  19. print(f"start vllm server: {sys.argv}")
  20. vllm_main()
  21. if __name__ == "__main__":
  22. main()