demo_vllm.py 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041
  1. import argparse
  2. import os
  3. from openai import OpenAI
  4. from transformers.utils.versions import require_version
  5. from PIL import Image
  6. import io
  7. import base64
  8. from dots_ocr.utils import dict_promptmode_to_prompt
  9. from dots_ocr.model.inference import inference_with_vllm
  10. parser = argparse.ArgumentParser()
  11. parser.add_argument("--ip", type=str, default="localhost")
  12. parser.add_argument("--port", type=str, default="8000")
  13. parser.add_argument("--model_name", type=str, default="model")
  14. parser.add_argument("--prompt_mode", type=str, default="prompt_layout_all_en")
  15. args = parser.parse_args()
  16. require_version("openai>=1.5.0", "To fix: pip install openai>=1.5.0")
  17. def main():
  18. addr = f"http://{args.ip}:{args.port}/v1"
  19. image_path = "demo/demo_image1.jpg"
  20. prompt = dict_promptmode_to_prompt[args.prompt_mode]
  21. image = Image.open(image_path)
  22. response = inference_with_vllm(
  23. image,
  24. prompt,
  25. ip="localhost",
  26. port=8000,
  27. temperature=0.1,
  28. top_p=0.9,
  29. )
  30. print(f"response: {response}")
  31. if __name__ == "__main__":
  32. main()