pipeline_v2.py 58 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import annotations
  15. import copy
  16. import re
  17. from typing import Any, Dict, List, Optional, Tuple, Union
  18. import numpy as np
  19. from PIL import Image
  20. from ....utils import logging
  21. from ....utils.deps import pipeline_requires_extra
  22. from ...common.batch_sampler import ImageBatchSampler
  23. from ...common.reader import ReadImage
  24. from ...models.object_detection.result import DetResult
  25. from ...utils.hpi import HPIConfig
  26. from ...utils.pp_option import PaddlePredictorOption
  27. from ..base import BasePipeline
  28. from ..ocr.result import OCRResult
  29. from .result_v2 import LayoutParsingBlock, LayoutParsingRegion, LayoutParsingResultV2
  30. from .utils import (
  31. caculate_bbox_area,
  32. calculate_minimum_enclosing_bbox,
  33. calculate_overlap_ratio,
  34. convert_formula_res_to_ocr_format,
  35. format_line,
  36. gather_imgs,
  37. get_bbox_intersection,
  38. get_sub_regions_ocr_res,
  39. group_boxes_into_lines,
  40. remove_overlap_blocks,
  41. shrink_supplement_region_bbox,
  42. split_boxes_by_projection,
  43. update_region_box,
  44. )
  45. @pipeline_requires_extra("ocr")
  46. class LayoutParsingPipelineV2(BasePipeline):
  47. """Layout Parsing Pipeline V2"""
  48. entities = ["PP-StructureV3"]
  49. def __init__(
  50. self,
  51. config: dict,
  52. device: str = None,
  53. pp_option: PaddlePredictorOption = None,
  54. use_hpip: bool = False,
  55. hpi_config: Optional[Union[Dict[str, Any], HPIConfig]] = None,
  56. ) -> None:
  57. """Initializes the layout parsing pipeline.
  58. Args:
  59. config (Dict): Configuration dictionary containing various settings.
  60. device (str, optional): Device to run the predictions on. Defaults to None.
  61. pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
  62. use_hpip (bool, optional): Whether to use the high-performance
  63. inference plugin (HPIP) by default. Defaults to False.
  64. hpi_config (Optional[Union[Dict[str, Any], HPIConfig]], optional):
  65. The default high-performance inference configuration dictionary.
  66. Defaults to None.
  67. """
  68. super().__init__(
  69. device=device,
  70. pp_option=pp_option,
  71. use_hpip=use_hpip,
  72. hpi_config=hpi_config,
  73. )
  74. self.inintial_predictor(config)
  75. self.batch_sampler = ImageBatchSampler(batch_size=1)
  76. self.img_reader = ReadImage(format="BGR")
  77. def inintial_predictor(self, config: dict) -> None:
  78. """Initializes the predictor based on the provided configuration.
  79. Args:
  80. config (Dict): A dictionary containing the configuration for the predictor.
  81. Returns:
  82. None
  83. """
  84. self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
  85. self.use_general_ocr = config.get("use_general_ocr", True)
  86. self.use_table_recognition = config.get("use_table_recognition", True)
  87. self.use_seal_recognition = config.get("use_seal_recognition", True)
  88. self.use_region_detection = config.get(
  89. "use_region_detection",
  90. False,
  91. )
  92. self.use_formula_recognition = config.get(
  93. "use_formula_recognition",
  94. True,
  95. )
  96. if self.use_doc_preprocessor:
  97. doc_preprocessor_config = config.get("SubPipelines", {}).get(
  98. "DocPreprocessor",
  99. {
  100. "pipeline_config_error": "config error for doc_preprocessor_pipeline!",
  101. },
  102. )
  103. self.doc_preprocessor_pipeline = self.create_pipeline(
  104. doc_preprocessor_config,
  105. )
  106. if self.use_region_detection:
  107. region_detection_config = config.get("SubModules", {}).get(
  108. "RegionDetection",
  109. {
  110. "model_config_error": "config error for block_region_detection_model!"
  111. },
  112. )
  113. self.region_detection_model = self.create_model(
  114. region_detection_config,
  115. )
  116. layout_det_config = config.get("SubModules", {}).get(
  117. "LayoutDetection",
  118. {"model_config_error": "config error for layout_det_model!"},
  119. )
  120. layout_kwargs = {}
  121. if (threshold := layout_det_config.get("threshold", None)) is not None:
  122. layout_kwargs["threshold"] = threshold
  123. if (layout_nms := layout_det_config.get("layout_nms", None)) is not None:
  124. layout_kwargs["layout_nms"] = layout_nms
  125. if (
  126. layout_unclip_ratio := layout_det_config.get("layout_unclip_ratio", None)
  127. ) is not None:
  128. layout_kwargs["layout_unclip_ratio"] = layout_unclip_ratio
  129. if (
  130. layout_merge_bboxes_mode := layout_det_config.get(
  131. "layout_merge_bboxes_mode", None
  132. )
  133. ) is not None:
  134. layout_kwargs["layout_merge_bboxes_mode"] = layout_merge_bboxes_mode
  135. self.layout_det_model = self.create_model(layout_det_config, **layout_kwargs)
  136. if self.use_general_ocr or self.use_table_recognition:
  137. general_ocr_config = config.get("SubPipelines", {}).get(
  138. "GeneralOCR",
  139. {"pipeline_config_error": "config error for general_ocr_pipeline!"},
  140. )
  141. self.general_ocr_pipeline = self.create_pipeline(
  142. general_ocr_config,
  143. )
  144. if self.use_seal_recognition:
  145. seal_recognition_config = config.get("SubPipelines", {}).get(
  146. "SealRecognition",
  147. {
  148. "pipeline_config_error": "config error for seal_recognition_pipeline!",
  149. },
  150. )
  151. self.seal_recognition_pipeline = self.create_pipeline(
  152. seal_recognition_config,
  153. )
  154. if self.use_table_recognition:
  155. table_recognition_config = config.get("SubPipelines", {}).get(
  156. "TableRecognition",
  157. {
  158. "pipeline_config_error": "config error for table_recognition_pipeline!",
  159. },
  160. )
  161. self.table_recognition_pipeline = self.create_pipeline(
  162. table_recognition_config,
  163. )
  164. if self.use_formula_recognition:
  165. formula_recognition_config = config.get("SubPipelines", {}).get(
  166. "FormulaRecognition",
  167. {
  168. "pipeline_config_error": "config error for formula_recognition_pipeline!",
  169. },
  170. )
  171. self.formula_recognition_pipeline = self.create_pipeline(
  172. formula_recognition_config,
  173. )
  174. return
  175. def get_text_paragraphs_ocr_res(
  176. self,
  177. overall_ocr_res: OCRResult,
  178. layout_det_res: DetResult,
  179. ) -> OCRResult:
  180. """
  181. Retrieves the OCR results for text paragraphs, excluding those of formulas, tables, and seals.
  182. Args:
  183. overall_ocr_res (OCRResult): The overall OCR result containing text information.
  184. layout_det_res (DetResult): The detection result containing the layout information of the document.
  185. Returns:
  186. OCRResult: The OCR result for text paragraphs after excluding formulas, tables, and seals.
  187. """
  188. object_boxes = []
  189. for box_info in layout_det_res["boxes"]:
  190. if box_info["label"].lower() in ["formula", "table", "seal"]:
  191. object_boxes.append(box_info["coordinate"])
  192. object_boxes = np.array(object_boxes)
  193. sub_regions_ocr_res = get_sub_regions_ocr_res(
  194. overall_ocr_res, object_boxes, flag_within=False
  195. )
  196. return sub_regions_ocr_res
  197. def check_model_settings_valid(self, input_params: dict) -> bool:
  198. """
  199. Check if the input parameters are valid based on the initialized models.
  200. Args:
  201. input_params (Dict): A dictionary containing input parameters.
  202. Returns:
  203. bool: True if all required models are initialized according to input parameters, False otherwise.
  204. """
  205. if input_params["use_doc_preprocessor"] and not self.use_doc_preprocessor:
  206. logging.error(
  207. "Set use_doc_preprocessor, but the models for doc preprocessor are not initialized.",
  208. )
  209. return False
  210. if input_params["use_general_ocr"] and not self.use_general_ocr:
  211. logging.error(
  212. "Set use_general_ocr, but the models for general OCR are not initialized.",
  213. )
  214. return False
  215. if input_params["use_seal_recognition"] and not self.use_seal_recognition:
  216. logging.error(
  217. "Set use_seal_recognition, but the models for seal recognition are not initialized.",
  218. )
  219. return False
  220. if input_params["use_table_recognition"] and not self.use_table_recognition:
  221. logging.error(
  222. "Set use_table_recognition, but the models for table recognition are not initialized.",
  223. )
  224. return False
  225. return True
  226. def standardized_data(
  227. self,
  228. image: list,
  229. parameters_config: dict,
  230. block_label_mapping: dict,
  231. region_det_res: DetResult,
  232. layout_det_res: DetResult,
  233. overall_ocr_res: OCRResult,
  234. formula_res_list: list,
  235. text_rec_model: Any,
  236. text_rec_score_thresh: Union[float, None] = None,
  237. ) -> list:
  238. """
  239. Retrieves the layout parsing result based on the layout detection result, OCR result, and other recognition results.
  240. Args:
  241. image (list): The input image.
  242. overall_ocr_res (OCRResult): An object containing the overall OCR results, including detected text boxes and recognized text. The structure is expected to have:
  243. - "input_img": The image on which OCR was performed.
  244. - "dt_boxes": A list of detected text box coordinates.
  245. - "rec_texts": A list of recognized text corresponding to the detected boxes.
  246. layout_det_res (DetResult): An object containing the layout detection results, including detected layout boxes and their labels. The structure is expected to have:
  247. - "boxes": A list of dictionaries with keys "coordinate" for box coordinates and "block_label" for the type of content.
  248. table_res_list (list): A list of table detection results, where each item is a dictionary containing:
  249. - "block_bbox": The bounding box of the table layout.
  250. - "pred_html": The predicted HTML representation of the table.
  251. formula_res_list (list): A list of formula recognition results.
  252. text_rec_model (Any): The text recognition model.
  253. text_rec_score_thresh (Optional[float], optional): The score threshold for text recognition. Defaults to None.
  254. Returns:
  255. list: A list of dictionaries representing the layout parsing result.
  256. """
  257. matched_ocr_dict = {}
  258. region_to_block_map = {}
  259. block_to_ocr_map = {}
  260. object_boxes = []
  261. footnote_list = []
  262. paragraph_title_list = []
  263. bottom_text_y_max = 0
  264. max_block_area = 0.0
  265. doc_title_num = 0
  266. base_region_bbox = [65535, 65535, 0, 0]
  267. layout_det_res = remove_overlap_blocks(
  268. layout_det_res,
  269. threshold=0.5,
  270. smaller=True,
  271. )
  272. # convert formula_res_list to OCRResult format
  273. convert_formula_res_to_ocr_format(formula_res_list, overall_ocr_res)
  274. # match layout boxes and ocr boxes and get some information for layout_order_config
  275. for box_idx, box_info in enumerate(layout_det_res["boxes"]):
  276. box = box_info["coordinate"]
  277. label = box_info["label"].lower()
  278. object_boxes.append(box)
  279. _, _, _, y2 = box
  280. # update the region box and max_block_area according to the layout boxes
  281. base_region_bbox = update_region_box(box, base_region_bbox)
  282. max_block_area = max(max_block_area, caculate_bbox_area(box))
  283. # update_layout_order_config_block_index(layout_order_config, label, box_idx)
  284. # set the label of footnote to text, when it is above the text boxes
  285. if label == "footnote":
  286. footnote_list.append(box_idx)
  287. elif label == "paragraph_title":
  288. paragraph_title_list.append(box_idx)
  289. if label == "text":
  290. bottom_text_y_max = max(y2, bottom_text_y_max)
  291. if label == "doc_title":
  292. doc_title_num += 1
  293. if label not in ["formula", "table", "seal"]:
  294. _, matched_idxes = get_sub_regions_ocr_res(
  295. overall_ocr_res, [box], return_match_idx=True
  296. )
  297. block_to_ocr_map[box_idx] = matched_idxes
  298. for matched_idx in matched_idxes:
  299. if matched_ocr_dict.get(matched_idx, None) is None:
  300. matched_ocr_dict[matched_idx] = [box_idx]
  301. else:
  302. matched_ocr_dict[matched_idx].append(box_idx)
  303. # fix the footnote label
  304. for footnote_idx in footnote_list:
  305. if (
  306. layout_det_res["boxes"][footnote_idx]["coordinate"][3]
  307. < bottom_text_y_max
  308. ):
  309. layout_det_res["boxes"][footnote_idx]["label"] = "text"
  310. # check if there is only one paragraph title and without doc_title
  311. only_one_paragraph_title = len(paragraph_title_list) == 1 and doc_title_num == 0
  312. if only_one_paragraph_title:
  313. paragraph_title_block_area = caculate_bbox_area(
  314. layout_det_res["boxes"][paragraph_title_list[0]]["coordinate"]
  315. )
  316. title_area_max_block_threshold = parameters_config["block"].get(
  317. "title_conversion_area_ratio_threshold", 0.3
  318. )
  319. if (
  320. paragraph_title_block_area
  321. > max_block_area * title_area_max_block_threshold
  322. ):
  323. layout_det_res["boxes"][paragraph_title_list[0]]["label"] = "doc_title"
  324. # Replace the OCR information of the hurdles.
  325. for overall_ocr_idx, layout_box_ids in matched_ocr_dict.items():
  326. if len(layout_box_ids) > 1:
  327. matched_no = 0
  328. overall_ocr_box = copy.deepcopy(
  329. overall_ocr_res["rec_boxes"][overall_ocr_idx]
  330. )
  331. overall_ocr_dt_poly = copy.deepcopy(
  332. overall_ocr_res["dt_polys"][overall_ocr_idx]
  333. )
  334. for box_idx in layout_box_ids:
  335. layout_box = layout_det_res["boxes"][box_idx]["coordinate"]
  336. crop_box = get_bbox_intersection(overall_ocr_box, layout_box)
  337. for ocr_idx in block_to_ocr_map[box_idx]:
  338. ocr_box = overall_ocr_res["rec_boxes"][ocr_idx]
  339. iou = calculate_overlap_ratio(ocr_box, crop_box, "small")
  340. if iou > 0.8:
  341. overall_ocr_res["rec_texts"][ocr_idx] = ""
  342. x1, y1, x2, y2 = [int(i) for i in crop_box]
  343. crop_img = np.array(image)[y1:y2, x1:x2]
  344. crop_img_rec_res = next(text_rec_model([crop_img]))
  345. crop_img_dt_poly = get_bbox_intersection(
  346. overall_ocr_dt_poly, layout_box, return_format="poly"
  347. )
  348. crop_img_rec_score = crop_img_rec_res["rec_score"]
  349. crop_img_rec_text = crop_img_rec_res["rec_text"]
  350. text_rec_score_thresh = (
  351. text_rec_score_thresh
  352. if text_rec_score_thresh is not None
  353. else (self.general_ocr_pipeline.text_rec_score_thresh)
  354. )
  355. if crop_img_rec_score >= text_rec_score_thresh:
  356. matched_no += 1
  357. if matched_no == 1:
  358. # the first matched ocr be replaced by the first matched layout box
  359. overall_ocr_res["dt_polys"][
  360. overall_ocr_idx
  361. ] = crop_img_dt_poly
  362. overall_ocr_res["rec_boxes"][overall_ocr_idx] = crop_box
  363. overall_ocr_res["rec_polys"][
  364. overall_ocr_idx
  365. ] = crop_img_dt_poly
  366. overall_ocr_res["rec_scores"][
  367. overall_ocr_idx
  368. ] = crop_img_rec_score
  369. overall_ocr_res["rec_texts"][
  370. overall_ocr_idx
  371. ] = crop_img_rec_text
  372. else:
  373. # the other matched ocr be appended to the overall ocr result
  374. overall_ocr_res["dt_polys"].append(crop_img_dt_poly)
  375. overall_ocr_res["rec_boxes"] = np.vstack(
  376. (overall_ocr_res["rec_boxes"], crop_box)
  377. )
  378. overall_ocr_res["rec_polys"].append(crop_img_dt_poly)
  379. overall_ocr_res["rec_scores"].append(crop_img_rec_score)
  380. overall_ocr_res["rec_texts"].append(crop_img_rec_text)
  381. overall_ocr_res["rec_labels"].append("text")
  382. block_to_ocr_map[box_idx].remove(overall_ocr_idx)
  383. block_to_ocr_map[box_idx].append(
  384. len(overall_ocr_res["rec_texts"]) - 1
  385. )
  386. # use layout bbox to do ocr recognition when there is no matched ocr
  387. for layout_box_idx, overall_ocr_idxes in block_to_ocr_map.items():
  388. has_text = False
  389. for idx in overall_ocr_idxes:
  390. if overall_ocr_res["rec_texts"][idx] != "":
  391. has_text = True
  392. break
  393. if not has_text and layout_det_res["boxes"][layout_box_idx][
  394. "label"
  395. ] not in block_label_mapping.get("vision_labels", []):
  396. crop_box = layout_det_res["boxes"][layout_box_idx]["coordinate"]
  397. x1, y1, x2, y2 = [int(i) for i in crop_box]
  398. crop_img = np.array(image)[y1:y2, x1:x2]
  399. crop_img_rec_res = next(text_rec_model([crop_img]))
  400. crop_img_dt_poly = get_bbox_intersection(
  401. crop_box, crop_box, return_format="poly"
  402. )
  403. crop_img_rec_score = crop_img_rec_res["rec_score"]
  404. crop_img_rec_text = crop_img_rec_res["rec_text"]
  405. text_rec_score_thresh = (
  406. text_rec_score_thresh
  407. if text_rec_score_thresh is not None
  408. else (self.general_ocr_pipeline.text_rec_score_thresh)
  409. )
  410. if crop_img_rec_score >= text_rec_score_thresh:
  411. overall_ocr_res["rec_boxes"] = np.vstack(
  412. (overall_ocr_res["rec_boxes"], crop_box)
  413. )
  414. overall_ocr_res["rec_polys"].append(crop_img_dt_poly)
  415. overall_ocr_res["rec_scores"].append(crop_img_rec_score)
  416. overall_ocr_res["rec_texts"].append(crop_img_rec_text)
  417. overall_ocr_res["rec_labels"].append("text")
  418. block_to_ocr_map[layout_box_idx].append(
  419. len(overall_ocr_res["rec_texts"]) - 1
  420. )
  421. # when there is no layout detection result but there is ocr result, convert ocr detection result to layout detection result
  422. if len(layout_det_res["boxes"]) == 0 and len(overall_ocr_res["rec_boxes"]) > 0:
  423. for idx, ocr_rec_box in enumerate(overall_ocr_res["rec_boxes"]):
  424. base_region_bbox = update_region_box(ocr_rec_box, base_region_bbox)
  425. layout_det_res["boxes"].append(
  426. {
  427. "label": "text",
  428. "coordinate": ocr_rec_box,
  429. "score": overall_ocr_res["rec_scores"][idx],
  430. }
  431. )
  432. block_to_ocr_map[idx] = [idx]
  433. block_bboxes = [box["coordinate"] for box in layout_det_res["boxes"]]
  434. region_det_res["boxes"] = sorted(
  435. region_det_res["boxes"],
  436. key=lambda item: caculate_bbox_area(item["coordinate"]),
  437. )
  438. if len(region_det_res["boxes"]) == 0:
  439. region_det_res["boxes"] = [
  440. {
  441. "coordinate": base_region_bbox,
  442. "label": "SupplementaryBlock",
  443. "score": 1,
  444. }
  445. ]
  446. region_to_block_map[0] = range(len(block_bboxes))
  447. else:
  448. block_idxes_set = set(range(len(block_bboxes)))
  449. # match block to region
  450. for region_idx, region_info in enumerate(region_det_res["boxes"]):
  451. matched_idxes = []
  452. region_to_block_map[region_idx] = []
  453. region_bbox = region_info["coordinate"]
  454. for block_idx in block_idxes_set:
  455. overlap_ratio = calculate_overlap_ratio(
  456. region_bbox, block_bboxes[block_idx], mode="small"
  457. )
  458. if overlap_ratio > parameters_config["region"].get(
  459. "match_block_overlap_ratio_threshold", 0.8
  460. ):
  461. region_to_block_map[region_idx].append(block_idx)
  462. matched_idxes.append(block_idx)
  463. if len(matched_idxes) > 0:
  464. for block_idx in matched_idxes:
  465. block_idxes_set.remove(block_idx)
  466. matched_bboxes = [block_bboxes[idx] for idx in matched_idxes]
  467. new_region_bbox = calculate_minimum_enclosing_bbox(matched_bboxes)
  468. region_det_res["boxes"][region_idx]["coordinate"] = new_region_bbox
  469. # Supplement region block when there is no matched block
  470. if len(block_idxes_set) > 0:
  471. while len(block_idxes_set) > 0:
  472. matched_idxes = []
  473. unmatched_bboxes = [block_bboxes[idx] for idx in block_idxes_set]
  474. supplement_region_bbox = calculate_minimum_enclosing_bbox(
  475. unmatched_bboxes
  476. )
  477. # check if the new region bbox is overlapped with other region bbox, if have, then shrink the new region bbox
  478. for region_info in region_det_res["boxes"]:
  479. region_bbox = region_info["coordinate"]
  480. overlap_ratio = calculate_overlap_ratio(
  481. supplement_region_bbox, region_bbox
  482. )
  483. if overlap_ratio > 0:
  484. supplement_region_bbox, matched_idxes = (
  485. shrink_supplement_region_bbox(
  486. supplement_region_bbox,
  487. region_bbox,
  488. image.shape[1],
  489. image.shape[0],
  490. block_idxes_set,
  491. block_bboxes,
  492. parameters_config,
  493. )
  494. )
  495. if len(matched_idxes) == 0:
  496. matched_idxes = list(block_idxes_set)
  497. region_idx = len(region_det_res["boxes"])
  498. region_to_block_map[region_idx] = list(matched_idxes)
  499. for block_idx in matched_idxes:
  500. block_idxes_set.remove(block_idx)
  501. region_det_res["boxes"].append(
  502. {
  503. "coordinate": supplement_region_bbox,
  504. "label": "SupplementaryBlock",
  505. "score": 1,
  506. }
  507. )
  508. region_block_ocr_idx_map = dict(
  509. region_to_block_map=region_to_block_map,
  510. block_to_ocr_map=block_to_ocr_map,
  511. )
  512. return region_block_ocr_idx_map, region_det_res, layout_det_res
  513. def sort_line_by_projection(
  514. self,
  515. line: List[List[Union[List[int], str]]],
  516. input_img: np.ndarray,
  517. text_rec_model: Any,
  518. text_rec_score_thresh: Union[float, None] = None,
  519. orientation: str = "vertical",
  520. ) -> None:
  521. """
  522. Sort a line of text spans based on their vertical position within the layout bounding box.
  523. Args:
  524. line (list): A list of spans, where each span is a list containing a bounding box and text.
  525. input_img (ndarray): The input image used for OCR.
  526. general_ocr_pipeline (Any): The general OCR pipeline used for text recognition.
  527. Returns:
  528. list: The sorted line of text spans.
  529. """
  530. sort_index = 0 if orientation == "horizontal" else 1
  531. splited_boxes = split_boxes_by_projection(line, orientation)
  532. splited_lines = []
  533. if len(line) != len(splited_boxes):
  534. splited_boxes.sort(key=lambda span: span[0][sort_index])
  535. for span in splited_boxes:
  536. bbox, text, label = span
  537. if label == "text":
  538. crop_img = input_img[
  539. int(bbox[1]) : int(bbox[3]),
  540. int(bbox[0]) : int(bbox[2]),
  541. ]
  542. crop_img_rec_res = next(text_rec_model([crop_img]))
  543. crop_img_rec_score = crop_img_rec_res["rec_score"]
  544. crop_img_rec_text = crop_img_rec_res["rec_text"]
  545. text = (
  546. crop_img_rec_text
  547. if crop_img_rec_score >= text_rec_score_thresh
  548. else ""
  549. )
  550. span[1] = text
  551. splited_lines.append(span)
  552. else:
  553. splited_lines = line
  554. return splited_lines
  555. def get_block_rec_content(
  556. self,
  557. image: list,
  558. line_parameters_config: dict,
  559. ocr_rec_res: dict,
  560. block: LayoutParsingBlock,
  561. text_rec_model: Any,
  562. text_rec_score_thresh: Union[float, None] = None,
  563. ) -> str:
  564. if len(ocr_rec_res["rec_texts"]) == 0:
  565. block.content = ""
  566. return block
  567. lines, text_orientation = group_boxes_into_lines(
  568. ocr_rec_res,
  569. line_parameters_config.get("line_height_iou_threshold", 0.8),
  570. )
  571. if block.label == "reference":
  572. rec_boxes = ocr_rec_res["boxes"]
  573. block_right_coordinate = max([box[2] for box in rec_boxes])
  574. last_line_span_limit = 20
  575. else:
  576. block_right_coordinate = block.bbox[2]
  577. last_line_span_limit = 10
  578. # format line
  579. text_lines = []
  580. need_new_line_num = 0
  581. sort_index = 0 if text_orientation == "horizontal" else 1
  582. for idx, line in enumerate(lines):
  583. line.sort(key=lambda span: span[0][sort_index])
  584. # merge formula and text
  585. ocr_labels = [span[2] for span in line]
  586. if "formula" in ocr_labels:
  587. line = self.sort_line_by_projection(
  588. line, image, text_rec_model, text_rec_score_thresh, text_orientation
  589. )
  590. line_text, need_new_line = format_line(
  591. line,
  592. block_right_coordinate,
  593. last_line_span_limit=last_line_span_limit,
  594. block_label=block.label,
  595. )
  596. if need_new_line:
  597. need_new_line_num += 1
  598. if idx == 0:
  599. line_start_coordinate = line[0][0][0]
  600. block.seg_start_coordinate = line_start_coordinate
  601. elif idx == len(lines) - 1:
  602. line_end_coordinate = line[-1][0][2]
  603. block.seg_end_coordinate = line_end_coordinate
  604. text_lines.append(line_text)
  605. delim = line_parameters_config["delimiter_map"].get(block.label, "")
  606. if need_new_line_num > len(text_lines) * 0.5 and delim == "":
  607. delim = "\n"
  608. content = delim.join(text_lines)
  609. block.content = content
  610. block.num_of_lines = len(text_lines)
  611. block.orientation = text_orientation
  612. return block
  613. def get_layout_parsing_blocks(
  614. self,
  615. image: list,
  616. parameters_config: dict,
  617. block_label_mapping: dict,
  618. region_block_ocr_idx_map: dict,
  619. region_det_res: DetResult,
  620. overall_ocr_res: OCRResult,
  621. layout_det_res: DetResult,
  622. table_res_list: list,
  623. seal_res_list: list,
  624. text_rec_model: Any,
  625. text_rec_score_thresh: Union[float, None] = None,
  626. ) -> list:
  627. """
  628. Extract structured information from OCR and layout detection results.
  629. Args:
  630. image (list): The input image.
  631. overall_ocr_res (OCRResult): An object containing the overall OCR results, including detected text boxes and recognized text. The structure is expected to have:
  632. - "input_img": The image on which OCR was performed.
  633. - "dt_boxes": A list of detected text box coordinates.
  634. - "rec_texts": A list of recognized text corresponding to the detected boxes.
  635. layout_det_res (DetResult): An object containing the layout detection results, including detected layout boxes and their labels. The structure is expected to have:
  636. - "boxes": A list of dictionaries with keys "coordinate" for box coordinates and "block_label" for the type of content.
  637. table_res_list (list): A list of table detection results, where each item is a dictionary containing:
  638. - "block_bbox": The bounding box of the table layout.
  639. - "pred_html": The predicted HTML representation of the table.
  640. seal_res_list (List): A list of seal detection results. The details of each item depend on the specific application context.
  641. text_rec_model (Any): A model for text recognition.
  642. text_rec_score_thresh (Union[float, None]): The minimum score required for a recognized character to be considered valid. If None, use the default value specified during initialization. Default is None.
  643. Returns:
  644. list: A list of structured boxes where each item is a dictionary containing:
  645. - "block_label": The label of the content (e.g., 'table', 'chart', 'image').
  646. - The label as a key with either table HTML or image data and text.
  647. - "block_bbox": The coordinates of the layout box.
  648. """
  649. table_index = 0
  650. seal_index = 0
  651. layout_parsing_blocks: List[LayoutParsingBlock] = []
  652. for box_idx, box_info in enumerate(layout_det_res["boxes"]):
  653. label = box_info["label"]
  654. block_bbox = box_info["coordinate"]
  655. rec_res = {"boxes": [], "rec_texts": [], "rec_labels": []}
  656. block = LayoutParsingBlock(label=label, bbox=block_bbox)
  657. if label == "table" and len(table_res_list) > 0:
  658. block.content = table_res_list[table_index]["pred_html"]
  659. table_index += 1
  660. elif label == "seal" and len(seal_res_list) > 0:
  661. block.content = seal_res_list[seal_index]["rec_texts"]
  662. seal_index += 1
  663. else:
  664. if label == "formula":
  665. _, ocr_idx_list = get_sub_regions_ocr_res(
  666. overall_ocr_res, [block_bbox], return_match_idx=True
  667. )
  668. region_block_ocr_idx_map["block_to_ocr_map"][box_idx] = ocr_idx_list
  669. else:
  670. ocr_idx_list = region_block_ocr_idx_map["block_to_ocr_map"].get(
  671. box_idx, []
  672. )
  673. for box_no in ocr_idx_list:
  674. rec_res["boxes"].append(overall_ocr_res["rec_boxes"][box_no])
  675. rec_res["rec_texts"].append(
  676. overall_ocr_res["rec_texts"][box_no],
  677. )
  678. rec_res["rec_labels"].append(
  679. overall_ocr_res["rec_labels"][box_no],
  680. )
  681. block = self.get_block_rec_content(
  682. image=image,
  683. block=block,
  684. line_parameters_config=parameters_config["line"],
  685. ocr_rec_res=rec_res,
  686. text_rec_model=text_rec_model,
  687. text_rec_score_thresh=text_rec_score_thresh,
  688. )
  689. if label in ["chart", "image"]:
  690. x_min, y_min, x_max, y_max = list(map(int, block_bbox))
  691. img_path = f"imgs/img_in_table_box_{x_min}_{y_min}_{x_max}_{y_max}.jpg"
  692. img = Image.fromarray(image[y_min:y_max, x_min:x_max, ::-1])
  693. block.image = {img_path: img}
  694. layout_parsing_blocks.append(block)
  695. region_list: List[LayoutParsingRegion] = []
  696. for region_idx, region_info in enumerate(region_det_res["boxes"]):
  697. region_bbox = region_info["coordinate"]
  698. region_blocks = [
  699. layout_parsing_blocks[idx]
  700. for idx in region_block_ocr_idx_map["region_to_block_map"][region_idx]
  701. ]
  702. region = LayoutParsingRegion(
  703. region_bbox=region_bbox,
  704. blocks=region_blocks,
  705. block_label_mapping=block_label_mapping,
  706. )
  707. region_list.append(region)
  708. region_list = sorted(
  709. region_list,
  710. key=lambda r: (r.euclidean_distance // 50, r.center_euclidean_distance),
  711. )
  712. return region_list
  713. def get_layout_parsing_res(
  714. self,
  715. image: list,
  716. region_det_res: DetResult,
  717. layout_det_res: DetResult,
  718. overall_ocr_res: OCRResult,
  719. table_res_list: list,
  720. seal_res_list: list,
  721. formula_res_list: list,
  722. text_rec_score_thresh: Union[float, None] = None,
  723. ) -> list:
  724. """
  725. Retrieves the layout parsing result based on the layout detection result, OCR result, and other recognition results.
  726. Args:
  727. image (list): The input image.
  728. layout_det_res (DetResult): The detection result containing the layout information of the document.
  729. overall_ocr_res (OCRResult): The overall OCR result containing text information.
  730. table_res_list (list): A list of table recognition results.
  731. seal_res_list (list): A list of seal recognition results.
  732. formula_res_list (list): A list of formula recognition results.
  733. text_rec_score_thresh (Optional[float], optional): The score threshold for text recognition. Defaults to None.
  734. Returns:
  735. list: A list of dictionaries representing the layout parsing result.
  736. """
  737. from .setting import block_label_mapping, parameters_config
  738. # Standardize data
  739. region_block_ocr_idx_map, region_det_res, layout_det_res = (
  740. self.standardized_data(
  741. image=image,
  742. parameters_config=parameters_config,
  743. block_label_mapping=block_label_mapping,
  744. region_det_res=region_det_res,
  745. layout_det_res=layout_det_res,
  746. overall_ocr_res=overall_ocr_res,
  747. formula_res_list=formula_res_list,
  748. text_rec_model=self.general_ocr_pipeline.text_rec_model,
  749. text_rec_score_thresh=text_rec_score_thresh,
  750. )
  751. )
  752. # Format layout parsing block
  753. region_list = self.get_layout_parsing_blocks(
  754. image=image,
  755. parameters_config=parameters_config,
  756. block_label_mapping=block_label_mapping,
  757. region_block_ocr_idx_map=region_block_ocr_idx_map,
  758. region_det_res=region_det_res,
  759. overall_ocr_res=overall_ocr_res,
  760. layout_det_res=layout_det_res,
  761. table_res_list=table_res_list,
  762. seal_res_list=seal_res_list,
  763. text_rec_model=self.general_ocr_pipeline.text_rec_model,
  764. text_rec_score_thresh=self.general_ocr_pipeline.text_rec_score_thresh,
  765. )
  766. parsing_res_list = []
  767. for region in region_list:
  768. parsing_res_list.extend(region.sort())
  769. visualize_index_labels = block_label_mapping["visualize_index_labels"]
  770. index = 1
  771. for block in parsing_res_list:
  772. if block.label in visualize_index_labels:
  773. block.index = index
  774. index += 1
  775. return parsing_res_list
  776. def get_model_settings(
  777. self,
  778. use_doc_orientation_classify: Union[bool, None],
  779. use_doc_unwarping: Union[bool, None],
  780. use_general_ocr: Union[bool, None],
  781. use_seal_recognition: Union[bool, None],
  782. use_table_recognition: Union[bool, None],
  783. use_formula_recognition: Union[bool, None],
  784. use_chart_recognition: Union[bool, None],
  785. use_region_detection: Union[bool, None],
  786. is_pretty_markdown: Union[bool, None],
  787. ) -> dict:
  788. """
  789. Get the model settings based on the provided parameters or default values.
  790. Args:
  791. use_doc_orientation_classify (Union[bool, None]): Enables document orientation classification if True. Defaults to system setting if None.
  792. use_doc_unwarping (Union[bool, None]): Enables document unwarping if True. Defaults to system setting if None.
  793. use_general_ocr (Union[bool, None]): Enables general OCR if True. Defaults to system setting if None.
  794. use_seal_recognition (Union[bool, None]): Enables seal recognition if True. Defaults to system setting if None.
  795. use_table_recognition (Union[bool, None]): Enables table recognition if True. Defaults to system setting if None.
  796. use_formula_recognition (Union[bool, None]): Enables formula recognition if True. Defaults to system setting if None.
  797. Returns:
  798. dict: A dictionary containing the model settings.
  799. """
  800. if use_doc_orientation_classify is None and use_doc_unwarping is None:
  801. use_doc_preprocessor = self.use_doc_preprocessor
  802. else:
  803. if use_doc_orientation_classify is True or use_doc_unwarping is True:
  804. use_doc_preprocessor = True
  805. else:
  806. use_doc_preprocessor = False
  807. if use_general_ocr is None:
  808. use_general_ocr = self.use_general_ocr
  809. if use_seal_recognition is None:
  810. use_seal_recognition = self.use_seal_recognition
  811. if use_table_recognition is None:
  812. use_table_recognition = self.use_table_recognition
  813. if use_formula_recognition is None:
  814. use_formula_recognition = self.use_formula_recognition
  815. if use_region_detection is None:
  816. use_region_detection = self.use_region_detection
  817. return dict(
  818. use_doc_preprocessor=use_doc_preprocessor,
  819. use_general_ocr=use_general_ocr,
  820. use_seal_recognition=use_seal_recognition,
  821. use_table_recognition=use_table_recognition,
  822. use_formula_recognition=use_formula_recognition,
  823. use_chart_recognition=use_chart_recognition,
  824. use_region_detection=use_region_detection,
  825. is_pretty_markdown=is_pretty_markdown,
  826. )
  827. def predict(
  828. self,
  829. input: Union[str, list[str], np.ndarray, list[np.ndarray]],
  830. use_doc_orientation_classify: Union[bool, None] = None,
  831. use_doc_unwarping: Union[bool, None] = None,
  832. use_textline_orientation: Optional[bool] = None,
  833. use_general_ocr: Union[bool, None] = None,
  834. use_seal_recognition: Union[bool, None] = None,
  835. use_table_recognition: Union[bool, None] = None,
  836. use_formula_recognition: Union[bool, None] = None,
  837. use_chart_recognition: Union[bool, None] = None,
  838. use_region_detection: Union[bool, None] = None,
  839. layout_threshold: Optional[Union[float, dict]] = None,
  840. layout_nms: Optional[bool] = None,
  841. layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
  842. layout_merge_bboxes_mode: Optional[str] = None,
  843. text_det_limit_side_len: Union[int, None] = None,
  844. text_det_limit_type: Union[str, None] = None,
  845. text_det_thresh: Union[float, None] = None,
  846. text_det_box_thresh: Union[float, None] = None,
  847. text_det_unclip_ratio: Union[float, None] = None,
  848. text_rec_score_thresh: Union[float, None] = None,
  849. seal_det_limit_side_len: Union[int, None] = None,
  850. seal_det_limit_type: Union[str, None] = None,
  851. seal_det_thresh: Union[float, None] = None,
  852. seal_det_box_thresh: Union[float, None] = None,
  853. seal_det_unclip_ratio: Union[float, None] = None,
  854. seal_rec_score_thresh: Union[float, None] = None,
  855. use_table_cells_ocr_results: bool = False,
  856. use_e2e_wired_table_rec_model: bool = False,
  857. use_e2e_wireless_table_rec_model: bool = True,
  858. is_pretty_markdown: Union[bool, None] = None,
  859. use_layout_gt: bool = False,
  860. layout_gt_dir: Union[str, None] = None,
  861. **kwargs,
  862. ) -> LayoutParsingResultV2:
  863. """
  864. Predicts the layout parsing result for the given input.
  865. Args:
  866. use_doc_orientation_classify (Optional[bool]): Whether to use document orientation classification.
  867. use_doc_unwarping (Optional[bool]): Whether to use document unwarping.
  868. use_textline_orientation (Optional[bool]): Whether to use textline orientation prediction.
  869. use_general_ocr (Optional[bool]): Whether to use general OCR.
  870. use_seal_recognition (Optional[bool]): Whether to use seal recognition.
  871. use_table_recognition (Optional[bool]): Whether to use table recognition.
  872. use_formula_recognition (Optional[bool]): Whether to use formula recognition.
  873. use_region_detection (Optional[bool]): Whether to use region detection.
  874. layout_threshold (Optional[float]): The threshold value to filter out low-confidence predictions. Default is None.
  875. layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to False.
  876. layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
  877. Defaults to None.
  878. If it's a single number, then both width and height are used.
  879. If it's a tuple of two numbers, then they are used separately for width and height respectively.
  880. If it's None, then no unclipping will be performed.
  881. layout_merge_bboxes_mode (Optional[str], optional): The mode for merging bounding boxes. Defaults to None.
  882. text_det_limit_side_len (Optional[int]): Maximum side length for text detection.
  883. text_det_limit_type (Optional[str]): Type of limit to apply for text detection.
  884. text_det_thresh (Optional[float]): Threshold for text detection.
  885. text_det_box_thresh (Optional[float]): Threshold for text detection boxes.
  886. text_det_unclip_ratio (Optional[float]): Ratio for unclipping text detection boxes.
  887. text_rec_score_thresh (Optional[float]): Score threshold for text recognition.
  888. seal_det_limit_side_len (Optional[int]): Maximum side length for seal detection.
  889. seal_det_limit_type (Optional[str]): Type of limit to apply for seal detection.
  890. seal_det_thresh (Optional[float]): Threshold for seal detection.
  891. seal_det_box_thresh (Optional[float]): Threshold for seal detection boxes.
  892. seal_det_unclip_ratio (Optional[float]): Ratio for unclipping seal detection boxes.
  893. seal_rec_score_thresh (Optional[float]): Score threshold for seal recognition.
  894. use_table_cells_ocr_results (bool): whether to use OCR results with cells.
  895. use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
  896. use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
  897. **kwargs (Any): Additional settings to extend functionality.
  898. Returns:
  899. LayoutParsingResultV2: The predicted layout parsing result.
  900. """
  901. model_settings = self.get_model_settings(
  902. use_doc_orientation_classify,
  903. use_doc_unwarping,
  904. use_general_ocr,
  905. use_seal_recognition,
  906. use_table_recognition,
  907. use_formula_recognition,
  908. use_chart_recognition,
  909. use_region_detection,
  910. is_pretty_markdown,
  911. )
  912. if not self.check_model_settings_valid(model_settings):
  913. yield {"error": "the input params for model settings are invalid!"}
  914. for batch_data in self.batch_sampler(input):
  915. image_array = self.img_reader(batch_data.instances)[0]
  916. if model_settings["use_doc_preprocessor"]:
  917. doc_preprocessor_res = next(
  918. self.doc_preprocessor_pipeline(
  919. image_array,
  920. use_doc_orientation_classify=use_doc_orientation_classify,
  921. use_doc_unwarping=use_doc_unwarping,
  922. ),
  923. )
  924. else:
  925. doc_preprocessor_res = {"output_img": image_array}
  926. doc_preprocessor_image = doc_preprocessor_res["output_img"]
  927. use_layout_gt = use_layout_gt
  928. if not use_layout_gt:
  929. layout_det_res = next(
  930. self.layout_det_model(
  931. doc_preprocessor_image,
  932. threshold=layout_threshold,
  933. layout_nms=layout_nms,
  934. layout_unclip_ratio=layout_unclip_ratio,
  935. layout_merge_bboxes_mode=layout_merge_bboxes_mode,
  936. )
  937. )
  938. else:
  939. import json
  940. import os
  941. from ...models.object_detection.result import DetResult
  942. label_dir = layout_gt_dir
  943. notes_path = f"{label_dir}/notes.json"
  944. labels = f"{label_dir}/labels"
  945. gt_file = os.path.basename(input)[:-4] + ".txt"
  946. gt_path = f"{labels}/{gt_file}"
  947. with open(notes_path, "r") as f:
  948. notes = json.load(f)
  949. categories_map = {}
  950. for categories in notes["categories"]:
  951. id = int(categories["id"])
  952. name = categories["name"]
  953. categories_map[id] = name
  954. with open(gt_path, "r") as f:
  955. lines = f.readlines()
  956. layout_det_res_dic = {
  957. "input_img": doc_preprocessor_image,
  958. "page_index": None,
  959. "boxes": [],
  960. }
  961. for line in lines:
  962. line = line.strip().split(" ")
  963. category_id = int(line[0])
  964. label = categories_map[category_id]
  965. img_h, img_w = doc_preprocessor_image.shape[:2]
  966. center_x = float(line[1]) * img_w
  967. center_y = float(line[2]) * img_h
  968. w = float(line[3]) * img_w
  969. h = float(line[4]) * img_h
  970. x0 = center_x - w / 2
  971. y0 = center_y - h / 2
  972. x1 = center_x + w / 2
  973. y1 = center_y + h / 2
  974. box = [x0, y0, x1, y1]
  975. layout_det_res_dic["boxes"].append(
  976. {
  977. "cls_id": category_id,
  978. "label": label,
  979. "coordinate": box,
  980. "score": 1.0,
  981. }
  982. )
  983. layout_det_res = DetResult(layout_det_res_dic)
  984. imgs_in_doc = gather_imgs(doc_preprocessor_image, layout_det_res["boxes"])
  985. if model_settings["use_region_detection"]:
  986. region_det_res = next(
  987. self.region_detection_model(
  988. doc_preprocessor_image,
  989. layout_nms=True,
  990. layout_merge_bboxes_mode="small",
  991. ),
  992. )
  993. else:
  994. region_det_res = {"boxes": []}
  995. if model_settings["use_formula_recognition"]:
  996. formula_res_all = next(
  997. self.formula_recognition_pipeline(
  998. doc_preprocessor_image,
  999. use_layout_detection=False,
  1000. use_doc_orientation_classify=False,
  1001. use_doc_unwarping=False,
  1002. layout_det_res=layout_det_res,
  1003. ),
  1004. )
  1005. formula_res_list = formula_res_all["formula_res_list"]
  1006. else:
  1007. formula_res_list = []
  1008. for formula_res in formula_res_list:
  1009. x_min, y_min, x_max, y_max = list(map(int, formula_res["dt_polys"]))
  1010. doc_preprocessor_image[y_min:y_max, x_min:x_max, :] = 255.0
  1011. if (
  1012. model_settings["use_general_ocr"]
  1013. or model_settings["use_table_recognition"]
  1014. ):
  1015. overall_ocr_res = next(
  1016. self.general_ocr_pipeline(
  1017. doc_preprocessor_image,
  1018. use_textline_orientation=use_textline_orientation,
  1019. text_det_limit_side_len=text_det_limit_side_len,
  1020. text_det_limit_type=text_det_limit_type,
  1021. text_det_thresh=text_det_thresh,
  1022. text_det_box_thresh=text_det_box_thresh,
  1023. text_det_unclip_ratio=text_det_unclip_ratio,
  1024. text_rec_score_thresh=text_rec_score_thresh,
  1025. ),
  1026. )
  1027. else:
  1028. overall_ocr_res = {}
  1029. overall_ocr_res["rec_labels"] = ["text"] * len(overall_ocr_res["rec_texts"])
  1030. if model_settings["use_table_recognition"]:
  1031. table_contents = copy.deepcopy(overall_ocr_res)
  1032. for formula_res in formula_res_list:
  1033. x_min, y_min, x_max, y_max = list(map(int, formula_res["dt_polys"]))
  1034. poly_points = [
  1035. (x_min, y_min),
  1036. (x_max, y_min),
  1037. (x_max, y_max),
  1038. (x_min, y_max),
  1039. ]
  1040. table_contents["dt_polys"].append(poly_points)
  1041. table_contents["rec_texts"].append(
  1042. f"${formula_res['rec_formula']}$"
  1043. )
  1044. table_contents["rec_boxes"] = np.vstack(
  1045. (table_contents["rec_boxes"], [formula_res["dt_polys"]])
  1046. )
  1047. table_contents["rec_polys"].append(poly_points)
  1048. table_contents["rec_scores"].append(1)
  1049. for img in imgs_in_doc:
  1050. img_path = img["path"]
  1051. x_min, y_min, x_max, y_max = img["coordinate"]
  1052. poly_points = [
  1053. (x_min, y_min),
  1054. (x_max, y_min),
  1055. (x_max, y_max),
  1056. (x_min, y_max),
  1057. ]
  1058. table_contents["dt_polys"].append(poly_points)
  1059. table_contents["rec_texts"].append(
  1060. f'<div style="text-align: center;"><img src="{img_path}" alt="Image" /></div>'
  1061. )
  1062. if table_contents["rec_boxes"].size == 0:
  1063. table_contents["rec_boxes"] = np.array([img["coordinate"]])
  1064. else:
  1065. table_contents["rec_boxes"] = np.vstack(
  1066. (table_contents["rec_boxes"], img["coordinate"])
  1067. )
  1068. table_contents["rec_polys"].append(poly_points)
  1069. table_contents["rec_scores"].append(img["score"])
  1070. table_res_all = next(
  1071. self.table_recognition_pipeline(
  1072. doc_preprocessor_image,
  1073. use_doc_orientation_classify=False,
  1074. use_doc_unwarping=False,
  1075. use_layout_detection=False,
  1076. use_ocr_model=False,
  1077. overall_ocr_res=table_contents,
  1078. layout_det_res=layout_det_res,
  1079. cell_sort_by_y_projection=True,
  1080. use_table_cells_ocr_results=use_table_cells_ocr_results,
  1081. use_e2e_wired_table_rec_model=use_e2e_wired_table_rec_model,
  1082. use_e2e_wireless_table_rec_model=use_e2e_wireless_table_rec_model,
  1083. ),
  1084. )
  1085. table_res_list = table_res_all["table_res_list"]
  1086. else:
  1087. table_res_list = []
  1088. if model_settings["use_seal_recognition"]:
  1089. seal_res_all = next(
  1090. self.seal_recognition_pipeline(
  1091. doc_preprocessor_image,
  1092. use_doc_orientation_classify=False,
  1093. use_doc_unwarping=False,
  1094. use_layout_detection=False,
  1095. layout_det_res=layout_det_res,
  1096. seal_det_limit_side_len=seal_det_limit_side_len,
  1097. seal_det_limit_type=seal_det_limit_type,
  1098. seal_det_thresh=seal_det_thresh,
  1099. seal_det_box_thresh=seal_det_box_thresh,
  1100. seal_det_unclip_ratio=seal_det_unclip_ratio,
  1101. seal_rec_score_thresh=seal_rec_score_thresh,
  1102. ),
  1103. )
  1104. seal_res_list = seal_res_all["seal_res_list"]
  1105. else:
  1106. seal_res_list = []
  1107. parsing_res_list = self.get_layout_parsing_res(
  1108. doc_preprocessor_image,
  1109. region_det_res=region_det_res,
  1110. layout_det_res=layout_det_res,
  1111. overall_ocr_res=overall_ocr_res,
  1112. table_res_list=table_res_list,
  1113. seal_res_list=seal_res_list,
  1114. formula_res_list=formula_res_list,
  1115. text_rec_score_thresh=text_rec_score_thresh,
  1116. )
  1117. for formula_res in formula_res_list:
  1118. x_min, y_min, x_max, y_max = list(map(int, formula_res["dt_polys"]))
  1119. doc_preprocessor_image[y_min:y_max, x_min:x_max, :] = formula_res[
  1120. "input_img"
  1121. ]
  1122. single_img_res = {
  1123. "input_path": batch_data.input_paths[0],
  1124. "page_index": batch_data.page_indexes[0],
  1125. "doc_preprocessor_res": doc_preprocessor_res,
  1126. "layout_det_res": layout_det_res,
  1127. "region_det_res": region_det_res,
  1128. "overall_ocr_res": overall_ocr_res,
  1129. "table_res_list": table_res_list,
  1130. "seal_res_list": seal_res_list,
  1131. "formula_res_list": formula_res_list,
  1132. "parsing_res_list": parsing_res_list,
  1133. "imgs_in_doc": imgs_in_doc,
  1134. "model_settings": model_settings,
  1135. }
  1136. yield LayoutParsingResultV2(single_img_res)
  1137. def concatenate_markdown_pages(self, markdown_list: list) -> tuple:
  1138. """
  1139. Concatenate Markdown content from multiple pages into a single document.
  1140. Args:
  1141. markdown_list (list): A list containing Markdown data for each page.
  1142. Returns:
  1143. tuple: A tuple containing the processed Markdown text.
  1144. """
  1145. markdown_texts = ""
  1146. previous_page_last_element_paragraph_end_flag = True
  1147. for res in markdown_list:
  1148. # Get the paragraph flags for the current page
  1149. page_first_element_paragraph_start_flag: bool = res[
  1150. "page_continuation_flags"
  1151. ][0]
  1152. page_last_element_paragraph_end_flag: bool = res["page_continuation_flags"][
  1153. 1
  1154. ]
  1155. # Determine whether to add a space or a newline
  1156. if (
  1157. not page_first_element_paragraph_start_flag
  1158. and not previous_page_last_element_paragraph_end_flag
  1159. ):
  1160. last_char_of_markdown = markdown_texts[-1] if markdown_texts else ""
  1161. first_char_of_handler = (
  1162. res["markdown_texts"][0] if res["markdown_texts"] else ""
  1163. )
  1164. # Check if the last character and the first character are Chinese characters
  1165. last_is_chinese_char = (
  1166. re.match(r"[\u4e00-\u9fff]", last_char_of_markdown)
  1167. if last_char_of_markdown
  1168. else False
  1169. )
  1170. first_is_chinese_char = (
  1171. re.match(r"[\u4e00-\u9fff]", first_char_of_handler)
  1172. if first_char_of_handler
  1173. else False
  1174. )
  1175. if not (last_is_chinese_char or first_is_chinese_char):
  1176. markdown_texts += " " + res["markdown_texts"]
  1177. else:
  1178. markdown_texts += res["markdown_texts"]
  1179. else:
  1180. markdown_texts += "\n\n" + res["markdown_texts"]
  1181. previous_page_last_element_paragraph_end_flag = (
  1182. page_last_element_paragraph_end_flag
  1183. )
  1184. return markdown_texts