parser.py 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. import os
  2. import json
  3. from tqdm import tqdm
  4. from multiprocessing.pool import ThreadPool, Pool
  5. import argparse
  6. from dots_ocr.model.inference import inference_with_vllm
  7. from dots_ocr.utils.consts import image_extensions, MIN_PIXELS, MAX_PIXELS
  8. from dots_ocr.utils.image_utils import get_image_by_fitz_doc, fetch_image, smart_resize
  9. from dots_ocr.utils.doc_utils import fitz_doc_to_image, load_images_from_pdf
  10. from dots_ocr.utils.prompts import dict_promptmode_to_prompt
  11. from dots_ocr.utils.layout_utils import post_process_output, draw_layout_on_image, pre_process_bboxes
  12. from dots_ocr.utils.format_transformer import layoutjson2md
  13. class DotsOCRParser:
  14. """
  15. parse image or pdf file
  16. """
  17. def __init__(self,
  18. ip='localhost',
  19. port=8000,
  20. model_name='model',
  21. temperature=0.1,
  22. top_p=1.0,
  23. max_completion_tokens=16384,
  24. num_thread=64,
  25. dpi = 200,
  26. output_dir="./output",
  27. min_pixels=None,
  28. max_pixels=None,
  29. use_hf=False,
  30. ):
  31. self.dpi = dpi
  32. # default args for vllm server
  33. self.ip = ip
  34. self.port = port
  35. self.model_name = model_name
  36. # default args for inference
  37. self.temperature = temperature
  38. self.top_p = top_p
  39. self.max_completion_tokens = max_completion_tokens
  40. self.num_thread = num_thread
  41. self.output_dir = output_dir
  42. self.min_pixels = min_pixels
  43. self.max_pixels = max_pixels
  44. self.use_hf = use_hf
  45. if self.use_hf:
  46. self._load_hf_model()
  47. print(f"use hf model, num_thread will be set to 1")
  48. else:
  49. print(f"use vllm model, num_thread will be set to {self.num_thread}")
  50. assert self.min_pixels is None or self.min_pixels >= MIN_PIXELS
  51. assert self.max_pixels is None or self.max_pixels <= MAX_PIXELS
  52. def _load_hf_model(self):
  53. import torch
  54. from transformers import AutoModelForCausalLM, AutoProcessor, AutoTokenizer
  55. from qwen_vl_utils import process_vision_info
  56. model_path = "./weights/DotsOCR"
  57. self.model = AutoModelForCausalLM.from_pretrained(
  58. model_path,
  59. attn_implementation="flash_attention_2",
  60. torch_dtype=torch.bfloat16,
  61. device_map="auto",
  62. trust_remote_code=True
  63. )
  64. self.processor = AutoProcessor.from_pretrained(model_path, trust_remote_code=True,use_fast=True)
  65. self.process_vision_info = process_vision_info
  66. def _inference_with_hf(self, image, prompt):
  67. messages = [
  68. {
  69. "role": "user",
  70. "content": [
  71. {
  72. "type": "image",
  73. "image": image
  74. },
  75. {"type": "text", "text": prompt}
  76. ]
  77. }
  78. ]
  79. # Preparation for inference
  80. text = self.processor.apply_chat_template(
  81. messages,
  82. tokenize=False,
  83. add_generation_prompt=True
  84. )
  85. image_inputs, video_inputs = self.process_vision_info(messages)
  86. inputs = self.processor(
  87. text=[text],
  88. images=image_inputs,
  89. videos=video_inputs,
  90. padding=True,
  91. return_tensors="pt",
  92. )
  93. inputs = inputs.to("cuda")
  94. # Inference: Generation of the output
  95. generated_ids = self.model.generate(**inputs, max_new_tokens=24000)
  96. generated_ids_trimmed = [
  97. out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
  98. ]
  99. response = self.processor.batch_decode(
  100. generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
  101. )[0]
  102. return response
  103. def _inference_with_vllm(self, image, prompt):
  104. response = inference_with_vllm(
  105. image,
  106. prompt,
  107. model_name=self.model_name,
  108. ip=self.ip,
  109. port=self.port,
  110. temperature=self.temperature,
  111. top_p=self.top_p,
  112. max_completion_tokens=self.max_completion_tokens,
  113. )
  114. return response
  115. def get_prompt(self, prompt_mode, bbox=None, origin_image=None, image=None, min_pixels=None, max_pixels=None):
  116. prompt = dict_promptmode_to_prompt[prompt_mode]
  117. if prompt_mode == 'prompt_grounding_ocr':
  118. assert bbox is not None
  119. bboxes = [bbox]
  120. bbox = pre_process_bboxes(origin_image, bboxes, input_width=image.width, input_height=image.height, min_pixels=min_pixels, max_pixels=max_pixels)[0]
  121. prompt = prompt + str(bbox)
  122. return prompt
  123. # def post_process_results(self, response, prompt_mode, save_dir, save_name, origin_image, image, min_pixels, max_pixels)
  124. def _parse_single_image(
  125. self,
  126. origin_image,
  127. prompt_mode,
  128. save_dir,
  129. save_name,
  130. source="image",
  131. page_idx=0,
  132. bbox=None,
  133. fitz_preprocess=False,
  134. ):
  135. min_pixels, max_pixels = self.min_pixels, self.max_pixels
  136. if prompt_mode == "prompt_grounding_ocr":
  137. min_pixels = min_pixels or MIN_PIXELS # preprocess image to the final input
  138. max_pixels = max_pixels or MAX_PIXELS
  139. if min_pixels is not None: assert min_pixels >= MIN_PIXELS, f"min_pixels should >= {MIN_PIXELS}"
  140. if max_pixels is not None: assert max_pixels <= MAX_PIXELS, f"max_pixels should <+ {MAX_PIXELS}"
  141. if source == 'image' and fitz_preprocess:
  142. image = get_image_by_fitz_doc(origin_image, target_dpi=self.dpi)
  143. image = fetch_image(image, min_pixels=min_pixels, max_pixels=max_pixels)
  144. else:
  145. image = fetch_image(origin_image, min_pixels=min_pixels, max_pixels=max_pixels)
  146. input_height, input_width = smart_resize(image.height, image.width)
  147. prompt = self.get_prompt(prompt_mode, bbox, origin_image, image, min_pixels=min_pixels, max_pixels=max_pixels)
  148. if self.use_hf:
  149. response = self._inference_with_hf(image, prompt)
  150. else:
  151. response = self._inference_with_vllm(image, prompt)
  152. result = {'page_no': page_idx,
  153. "input_height": input_height,
  154. "input_width": input_width
  155. }
  156. if source == 'pdf':
  157. save_name = f"{save_name}_page_{page_idx}"
  158. if prompt_mode in ['prompt_layout_all_en', 'prompt_layout_only_en', 'prompt_grounding_ocr']:
  159. cells, filtered = post_process_output(
  160. response,
  161. prompt_mode,
  162. origin_image,
  163. image,
  164. min_pixels=min_pixels,
  165. max_pixels=max_pixels,
  166. )
  167. if filtered and prompt_mode != 'prompt_layout_only_en': # model output json failed, use filtered process
  168. json_file_path = os.path.join(save_dir, f"{save_name}.json")
  169. with open(json_file_path, 'w', encoding="utf-8") as w:
  170. json.dump(response, w, ensure_ascii=False)
  171. image_layout_path = os.path.join(save_dir, f"{save_name}.jpg")
  172. origin_image.save(image_layout_path)
  173. result.update({
  174. 'layout_info_path': json_file_path,
  175. 'layout_image_path': image_layout_path,
  176. })
  177. md_file_path = os.path.join(save_dir, f"{save_name}.md")
  178. with open(md_file_path, "w", encoding="utf-8") as md_file:
  179. md_file.write(cells)
  180. result.update({
  181. 'md_content_path': md_file_path
  182. })
  183. result.update({
  184. 'filtered': True
  185. })
  186. else:
  187. try:
  188. image_with_layout = draw_layout_on_image(origin_image, cells)
  189. except Exception as e:
  190. print(f"Error drawing layout on image: {e}")
  191. image_with_layout = origin_image
  192. json_file_path = os.path.join(save_dir, f"{save_name}.json")
  193. with open(json_file_path, 'w', encoding="utf-8") as w:
  194. json.dump(cells, w, ensure_ascii=False)
  195. image_layout_path = os.path.join(save_dir, f"{save_name}.jpg")
  196. image_with_layout.save(image_layout_path)
  197. result.update({
  198. 'layout_info_path': json_file_path,
  199. 'layout_image_path': image_layout_path,
  200. })
  201. if prompt_mode != "prompt_layout_only_en": # no text md when detection only
  202. md_content = layoutjson2md(origin_image, cells, text_key='text')
  203. md_content_no_hf = layoutjson2md(origin_image, cells, text_key='text', no_page_hf=True) # used for clean output or metric of omnidocbench、olmbench
  204. md_file_path = os.path.join(save_dir, f"{save_name}.md")
  205. with open(md_file_path, "w", encoding="utf-8") as md_file:
  206. md_file.write(md_content)
  207. md_nohf_file_path = os.path.join(save_dir, f"{save_name}_nohf.md")
  208. with open(md_nohf_file_path, "w", encoding="utf-8") as md_file:
  209. md_file.write(md_content_no_hf)
  210. result.update({
  211. 'md_content_path': md_file_path,
  212. 'md_content_nohf_path': md_nohf_file_path,
  213. })
  214. else:
  215. image_layout_path = os.path.join(save_dir, f"{save_name}.jpg")
  216. origin_image.save(image_layout_path)
  217. result.update({
  218. 'layout_image_path': image_layout_path,
  219. })
  220. md_content = response
  221. md_file_path = os.path.join(save_dir, f"{save_name}.md")
  222. with open(md_file_path, "w", encoding="utf-8") as md_file:
  223. md_file.write(md_content)
  224. result.update({
  225. 'md_content_path': md_file_path,
  226. })
  227. return result
  228. def parse_image(self, input_path, filename, prompt_mode, save_dir, bbox=None, fitz_preprocess=False):
  229. origin_image = fetch_image(input_path)
  230. result = self._parse_single_image(origin_image, prompt_mode, save_dir, filename, source="image", bbox=bbox, fitz_preprocess=fitz_preprocess)
  231. result['file_path'] = input_path
  232. return [result]
  233. def parse_pdf(self, input_path, filename, prompt_mode, save_dir):
  234. print(f"loading pdf: {input_path}")
  235. images_origin = load_images_from_pdf(input_path, dpi=self.dpi)
  236. total_pages = len(images_origin)
  237. tasks = [
  238. {
  239. "origin_image": image,
  240. "prompt_mode": prompt_mode,
  241. "save_dir": save_dir,
  242. "save_name": filename,
  243. "source":"pdf",
  244. "page_idx": i,
  245. } for i, image in enumerate(images_origin)
  246. ]
  247. def _execute_task(task_args):
  248. return self._parse_single_image(**task_args)
  249. if self.use_hf:
  250. num_thread = 1
  251. else:
  252. num_thread = min(total_pages, self.num_thread)
  253. print(f"Parsing PDF with {total_pages} pages using {num_thread} threads...")
  254. results = []
  255. with ThreadPool(num_thread) as pool:
  256. with tqdm(total=total_pages, desc="Processing PDF pages") as pbar:
  257. for result in pool.imap_unordered(_execute_task, tasks):
  258. results.append(result)
  259. pbar.update(1)
  260. results.sort(key=lambda x: x["page_no"])
  261. for i in range(len(results)):
  262. results[i]['file_path'] = input_path
  263. return results
  264. def parse_file(self,
  265. input_path,
  266. output_dir="",
  267. prompt_mode="prompt_layout_all_en",
  268. bbox=None,
  269. fitz_preprocess=False
  270. ):
  271. output_dir = output_dir or self.output_dir
  272. output_dir = os.path.abspath(output_dir)
  273. filename, file_ext = os.path.splitext(os.path.basename(input_path))
  274. save_dir = os.path.join(output_dir, filename)
  275. os.makedirs(save_dir, exist_ok=True)
  276. if file_ext == '.pdf':
  277. results = self.parse_pdf(input_path, filename, prompt_mode, save_dir)
  278. elif file_ext in image_extensions:
  279. results = self.parse_image(input_path, filename, prompt_mode, save_dir, bbox=bbox, fitz_preprocess=fitz_preprocess)
  280. else:
  281. raise ValueError(f"file extension {file_ext} not supported, supported extensions are {image_extensions} and pdf")
  282. print(f"Parsing finished, results saving to {save_dir}")
  283. with open(os.path.join(output_dir, os.path.basename(filename)+'.jsonl'), 'w', encoding="utf-8") as w:
  284. for result in results:
  285. w.write(json.dumps(result, ensure_ascii=False) + '\n')
  286. return results
  287. def main():
  288. prompts = list(dict_promptmode_to_prompt.keys())
  289. parser = argparse.ArgumentParser(
  290. description="dots.ocr Multilingual Document Layout Parser",
  291. )
  292. parser.add_argument(
  293. "input_path", type=str,
  294. help="Input PDF/image file path"
  295. )
  296. parser.add_argument(
  297. "--output", type=str, default="./output",
  298. help="Output directory (default: ./output)"
  299. )
  300. parser.add_argument(
  301. "--prompt", choices=prompts, type=str, default="prompt_layout_all_en",
  302. help="prompt to query the model, different prompts for different tasks"
  303. )
  304. parser.add_argument(
  305. '--bbox',
  306. type=int,
  307. nargs=4,
  308. metavar=('x1', 'y1', 'x2', 'y2'),
  309. help='should give this argument if you want to prompt_grounding_ocr'
  310. )
  311. parser.add_argument(
  312. "--ip", type=str, default="localhost",
  313. help=""
  314. )
  315. parser.add_argument(
  316. "--port", type=int, default=8000,
  317. help=""
  318. )
  319. parser.add_argument(
  320. "--model_name", type=str, default="model",
  321. help=""
  322. )
  323. parser.add_argument(
  324. "--temperature", type=float, default=0.1,
  325. help=""
  326. )
  327. parser.add_argument(
  328. "--top_p", type=float, default=1.0,
  329. help=""
  330. )
  331. parser.add_argument(
  332. "--dpi", type=int, default=200,
  333. help=""
  334. )
  335. parser.add_argument(
  336. "--max_completion_tokens", type=int, default=16384,
  337. help=""
  338. )
  339. parser.add_argument(
  340. "--num_thread", type=int, default=16,
  341. help=""
  342. )
  343. parser.add_argument(
  344. "--no_fitz_preprocess", action='store_true',
  345. help="False will use tikz dpi upsample pipeline, good for images which has been render with low dpi, but maybe result in higher computational costs"
  346. )
  347. parser.add_argument(
  348. "--min_pixels", type=int, default=None,
  349. help=""
  350. )
  351. parser.add_argument(
  352. "--max_pixels", type=int, default=None,
  353. help=""
  354. )
  355. parser.add_argument(
  356. "--use_hf", type=bool, default=False,
  357. help=""
  358. )
  359. args = parser.parse_args()
  360. dots_ocr_parser = DotsOCRParser(
  361. ip=args.ip,
  362. port=args.port,
  363. model_name=args.model_name,
  364. temperature=args.temperature,
  365. top_p=args.top_p,
  366. max_completion_tokens=args.max_completion_tokens,
  367. num_thread=args.num_thread,
  368. dpi=args.dpi,
  369. output_dir=args.output,
  370. min_pixels=args.min_pixels,
  371. max_pixels=args.max_pixels,
  372. use_hf=args.use_hf,
  373. )
  374. fitz_preprocess = not args.no_fitz_preprocess
  375. if fitz_preprocess:
  376. print(f"Using fitz preprocess for image input, check the change of the image pixels")
  377. result = dots_ocr_parser.parse_file(
  378. args.input_path,
  379. prompt_mode=args.prompt,
  380. bbox=args.bbox,
  381. fitz_preprocess=fitz_preprocess,
  382. )
  383. if __name__ == "__main__":
  384. main()