demo_vllm.py 1.1 KB

123456789101112131415161718192021222324252627282930313233343536373839
  1. import argparse
  2. from openai import OpenAI
  3. from transformers.utils.versions import require_version
  4. from PIL import Image
  5. from dots_ocr.utils import dict_promptmode_to_prompt
  6. from dots_ocr.model.inference import inference_with_vllm
  7. parser = argparse.ArgumentParser()
  8. parser.add_argument("--ip", type=str, default="localhost")
  9. parser.add_argument("--port", type=str, default="8000")
  10. parser.add_argument("--model_name", type=str, default="rednote-hilab/dots.ocr")
  11. parser.add_argument("--prompt_mode", type=str, default="prompt_layout_all_en")
  12. args = parser.parse_args()
  13. require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
  14. def main():
  15. addr = f"http://{args.ip}:{args.port}/v1"
  16. image_path = "demo/demo_image1.jpg"
  17. prompt = dict_promptmode_to_prompt[args.prompt_mode]
  18. image = Image.open(image_path)
  19. response = inference_with_vllm(
  20. image,
  21. prompt,
  22. ip=args.ip,
  23. port=args.port,
  24. temperature=0.1,
  25. top_p=0.9,
  26. model_name=args.model_name,
  27. )
  28. print(f"response: {response}")
  29. if __name__ == "__main__":
  30. main()