vlm_analyze.py 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import time
  3. from loguru import logger
  4. from .model_output_to_middle_json import result_to_middle_json
  5. from ...data.data_reader_writer import DataWriter
  6. from mineru.utils.pdf_image_tools import load_images_from_pdf
  7. from ...utils.enum_class import ImageType
  8. from ...utils.models_download_utils import auto_download_and_get_model_root_path
  9. from mineru_vl_utils import MinerUClient
  10. class ModelSingleton:
  11. _instance = None
  12. _models = {}
  13. def __new__(cls, *args, **kwargs):
  14. if cls._instance is None:
  15. cls._instance = super().__new__(cls)
  16. return cls._instance
  17. def get_model(
  18. self,
  19. backend: str,
  20. model_path: str | None,
  21. server_url: str | None,
  22. **kwargs,
  23. ) -> MinerUClient:
  24. key = (backend, model_path, server_url)
  25. if key not in self._models:
  26. start_time = time.time()
  27. model = None
  28. processor = None
  29. vllm_llm = None
  30. vllm_async_llm = None
  31. if backend in ['transformers', 'vllm-engine', "vllm-async-engine"] and not model_path:
  32. model_path = auto_download_and_get_model_root_path("/","vlm")
  33. if backend == "transformers":
  34. try:
  35. from transformers import (
  36. AutoProcessor,
  37. Qwen2VLForConditionalGeneration,
  38. )
  39. from transformers import __version__ as transformers_version
  40. except ImportError:
  41. raise ImportError("Please install transformers to use the transformers backend.")
  42. from packaging import version
  43. if version.parse(transformers_version) >= version.parse("4.56.0"):
  44. dtype_key = "dtype"
  45. else:
  46. dtype_key = "torch_dtype"
  47. model = Qwen2VLForConditionalGeneration.from_pretrained(
  48. model_path,
  49. device_map="auto",
  50. **{dtype_key: "auto"}, # type: ignore
  51. )
  52. processor = AutoProcessor.from_pretrained(
  53. model_path,
  54. use_fast=True,
  55. )
  56. elif backend == "vllm-engine":
  57. try:
  58. import vllm
  59. except ImportError:
  60. raise ImportError("Please install vllm to use the vllm-engine backend.")
  61. if "gpu_memory_utilization" not in kwargs:
  62. kwargs["gpu_memory_utilization"] = 0.5
  63. if "model" not in kwargs:
  64. kwargs["model"] = model_path
  65. # 使用kwargs为 vllm初始化参数
  66. vllm_llm = vllm.LLM(**kwargs)
  67. elif backend == "vllm-async-engine":
  68. try:
  69. from vllm.engine.arg_utils import AsyncEngineArgs
  70. from vllm.v1.engine.async_llm import AsyncLLM
  71. except ImportError:
  72. raise ImportError("Please install vllm to use the vllm-async-engine backend.")
  73. if "gpu_memory_utilization" not in kwargs:
  74. kwargs["gpu_memory_utilization"] = 0.5
  75. if "model" not in kwargs:
  76. kwargs["model"] = model_path
  77. # 使用kwargs为 vllm初始化参数
  78. vllm_async_llm = AsyncLLM.from_engine_args(AsyncEngineArgs(**kwargs))
  79. self._models[key] = MinerUClient(
  80. backend=backend,
  81. model=model,
  82. processor=processor,
  83. vllm_llm=vllm_llm,
  84. vllm_async_llm=vllm_async_llm,
  85. server_url=server_url,
  86. )
  87. elapsed = round(time.time() - start_time, 2)
  88. logger.info(f"get {backend} predictor cost: {elapsed}s")
  89. return self._models[key]
  90. def doc_analyze(
  91. pdf_bytes,
  92. image_writer: DataWriter | None,
  93. predictor: MinerUClient | None = None,
  94. backend="transformers",
  95. model_path: str | None = None,
  96. server_url: str | None = None,
  97. **kwargs,
  98. ):
  99. if predictor is None:
  100. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  101. # load_images_start = time.time()
  102. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  103. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  104. # load_images_time = round(time.time() - load_images_start, 2)
  105. # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")
  106. # infer_start = time.time()
  107. results = predictor.batch_two_step_extract(images=images_pil_list)
  108. # infer_time = round(time.time() - infer_start, 2)
  109. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  110. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  111. return middle_json, results
  112. async def aio_doc_analyze(
  113. pdf_bytes,
  114. image_writer: DataWriter | None,
  115. predictor: MinerUClient | None = None,
  116. backend="transformers",
  117. model_path: str | None = None,
  118. server_url: str | None = None,
  119. **kwargs,
  120. ):
  121. if predictor is None:
  122. predictor = ModelSingleton().get_model(backend, model_path, server_url, **kwargs)
  123. # load_images_start = time.time()
  124. images_list, pdf_doc = load_images_from_pdf(pdf_bytes, image_type=ImageType.PIL)
  125. images_pil_list = [image_dict["img_pil"] for image_dict in images_list]
  126. # load_images_time = round(time.time() - load_images_start, 2)
  127. # logger.info(f"load images cost: {load_images_time}, speed: {round(len(images_base64_list)/load_images_time, 3)} images/s")
  128. # infer_start = time.time()
  129. results = await predictor.aio_batch_two_step_extract(images=images_pil_list)
  130. # infer_time = round(time.time() - infer_start, 2)
  131. # logger.info(f"infer finished, cost: {infer_time}, speed: {round(len(results)/infer_time, 3)} page/s")
  132. middle_json = result_to_middle_json(results, images_list, pdf_doc, image_writer)
  133. return middle_json, results