main.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. import html
  2. import logging
  3. import os
  4. import time
  5. import traceback
  6. from dataclasses import dataclass, asdict
  7. from typing import List, Optional, Union, Dict, Any
  8. import numpy as np
  9. import cv2
  10. from PIL import Image
  11. from loguru import logger
  12. from bs4 import BeautifulSoup
  13. from mineru.utils.span_pre_proc import calculate_contrast
  14. from .table_structure_unet import TSRUnet
  15. from mineru.utils.enum_class import ModelPath
  16. from mineru.utils.models_download_utils import auto_download_and_get_model_root_path
  17. from .table_recover import TableRecover
  18. from .utils import InputType, LoadImage, VisTable
  19. from .utils_table_recover import (
  20. match_ocr_cell,
  21. plot_html_table,
  22. box_4_2_poly_to_box_4_1,
  23. sorted_ocr_boxes,
  24. gather_ocr_list_by_row,
  25. )
  26. @dataclass
  27. class WiredTableInput:
  28. model_path: str
  29. device: str = "cpu"
  30. @dataclass
  31. class WiredTableOutput:
  32. pred_html: Optional[str] = None
  33. cell_bboxes: Optional[np.ndarray] = None
  34. logic_points: Optional[np.ndarray] = None
  35. elapse: Optional[float] = None
  36. class WiredTableRecognition:
  37. def __init__(self, config: WiredTableInput, ocr_engine=None):
  38. self.table_structure = TSRUnet(asdict(config))
  39. self.load_img = LoadImage()
  40. self.table_recover = TableRecover()
  41. self.ocr_engine = ocr_engine
  42. def __call__(
  43. self,
  44. img: InputType,
  45. ocr_result: Optional[List[Union[List[List[float]], str, str]]] = None,
  46. **kwargs,
  47. ) -> WiredTableOutput:
  48. s = time.perf_counter()
  49. need_ocr = True
  50. col_threshold = 15
  51. row_threshold = 10
  52. if kwargs:
  53. need_ocr = kwargs.get("need_ocr", True)
  54. col_threshold = kwargs.get("col_threshold", 15)
  55. row_threshold = kwargs.get("row_threshold", 10)
  56. img = self.load_img(img)
  57. polygons, rotated_polygons = self.table_structure(img, **kwargs)
  58. if polygons is None:
  59. # logging.warning("polygons is None.")
  60. return WiredTableOutput("", None, None, 0.0)
  61. try:
  62. table_res, logi_points = self.table_recover(
  63. rotated_polygons, row_threshold, col_threshold
  64. )
  65. # 将坐标由逆时针转为顺时针方向,后续处理与无线表格对齐
  66. polygons[:, 1, :], polygons[:, 3, :] = (
  67. polygons[:, 3, :].copy(),
  68. polygons[:, 1, :].copy(),
  69. )
  70. if not need_ocr:
  71. sorted_polygons, idx_list = sorted_ocr_boxes(
  72. [box_4_2_poly_to_box_4_1(box) for box in polygons]
  73. )
  74. return WiredTableOutput(
  75. "",
  76. sorted_polygons,
  77. logi_points[idx_list],
  78. time.perf_counter() - s,
  79. )
  80. cell_box_det_map, not_match_orc_boxes = match_ocr_cell(ocr_result, polygons)
  81. # 如果有识别框没有ocr结果,直接进行rec补充
  82. cell_box_det_map = self.fill_blank_rec(img, polygons, cell_box_det_map)
  83. # 转换为中间格式,修正识别框坐标,将物理识别框,逻辑识别框,ocr识别框整合为dict,方便后续处理
  84. t_rec_ocr_list = self.transform_res(cell_box_det_map, polygons, logi_points)
  85. # 将每个单元格中的ocr识别结果排序和同行合并,输出的html能完整保留文字的换行格式
  86. t_rec_ocr_list = self.sort_and_gather_ocr_res(t_rec_ocr_list)
  87. logi_points = [t_box_ocr["t_logic_box"] for t_box_ocr in t_rec_ocr_list]
  88. cell_box_det_map = {
  89. i: [ocr_box_and_text[1] for ocr_box_and_text in t_box_ocr["t_ocr_res"]]
  90. for i, t_box_ocr in enumerate(t_rec_ocr_list)
  91. }
  92. pred_html = plot_html_table(logi_points, cell_box_det_map)
  93. polygons = np.array(polygons).reshape(-1, 8)
  94. logi_points = np.array(logi_points)
  95. elapse = time.perf_counter() - s
  96. except Exception:
  97. logging.warning(traceback.format_exc())
  98. return WiredTableOutput("", None, None, 0.0)
  99. return WiredTableOutput(pred_html, polygons, logi_points, elapse)
  100. def transform_res(
  101. self,
  102. cell_box_det_map: Dict[int, List[any]],
  103. polygons: np.ndarray,
  104. logi_points: List[np.ndarray],
  105. ) -> List[Dict[str, any]]:
  106. res = []
  107. for i in range(len(polygons)):
  108. ocr_res_list = cell_box_det_map.get(i)
  109. if not ocr_res_list:
  110. continue
  111. xmin = min([ocr_box[0][0][0] for ocr_box in ocr_res_list])
  112. ymin = min([ocr_box[0][0][1] for ocr_box in ocr_res_list])
  113. xmax = max([ocr_box[0][2][0] for ocr_box in ocr_res_list])
  114. ymax = max([ocr_box[0][2][1] for ocr_box in ocr_res_list])
  115. dict_res = {
  116. # xmin,xmax,ymin,ymax
  117. "t_box": [xmin, ymin, xmax, ymax],
  118. # row_start,row_end,col_start,col_end
  119. "t_logic_box": logi_points[i].tolist(),
  120. # [[xmin,xmax,ymin,ymax], text]
  121. "t_ocr_res": [
  122. [box_4_2_poly_to_box_4_1(ocr_det[0]), ocr_det[1]]
  123. for ocr_det in ocr_res_list
  124. ],
  125. }
  126. res.append(dict_res)
  127. return res
  128. def sort_and_gather_ocr_res(self, res):
  129. for i, dict_res in enumerate(res):
  130. _, sorted_idx = sorted_ocr_boxes(
  131. [ocr_det[0] for ocr_det in dict_res["t_ocr_res"]], threhold=0.3
  132. )
  133. dict_res["t_ocr_res"] = [dict_res["t_ocr_res"][i] for i in sorted_idx]
  134. dict_res["t_ocr_res"] = gather_ocr_list_by_row(
  135. dict_res["t_ocr_res"], threhold=0.3
  136. )
  137. return res
  138. # def fill_blank_rec(
  139. # self,
  140. # img: np.ndarray,
  141. # sorted_polygons: np.ndarray,
  142. # cell_box_map: Dict[int, List[str]],
  143. # ) -> Dict[int, List[Any]]:
  144. # """找到poly对应为空的框,尝试将直接将poly框直接送到识别中"""
  145. # for i in range(sorted_polygons.shape[0]):
  146. # if cell_box_map.get(i):
  147. # continue
  148. # box = sorted_polygons[i]
  149. # cell_box_map[i] = [[box, "", 1]]
  150. # continue
  151. # return cell_box_map
  152. def fill_blank_rec(
  153. self,
  154. img: np.ndarray,
  155. sorted_polygons: np.ndarray,
  156. cell_box_map: Dict[int, List[str]],
  157. ) -> Dict[int, List[Any]]:
  158. """找到poly对应为空的框,尝试将直接将poly框直接送到识别中"""
  159. bgr_img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  160. img_crop_info_list = []
  161. img_crop_list = []
  162. for i in range(sorted_polygons.shape[0]):
  163. if cell_box_map.get(i):
  164. continue
  165. box = sorted_polygons[i]
  166. if self.ocr_engine is None:
  167. logger.warning(f"No OCR engine provided for box {i}: {box}")
  168. continue
  169. # 从img中截取对应的区域
  170. x1, y1, x2, y2 = int(box[0][0])+1, int(box[0][1])+1, int(box[2][0])-1, int(box[2][1])-1
  171. if x1 >= x2 or y1 >= y2 or x1 < 0 or y1 < 0:
  172. # logger.warning(f"Invalid box coordinates: {x1, y1, x2, y2}")
  173. continue
  174. # 判断长宽比
  175. if (x2 - x1) / (y2 - y1) > 20 or (y2 - y1) / (x2 - x1) > 20:
  176. # logger.warning(f"Box {i} has invalid aspect ratio: {x1, y1, x2, y2}")
  177. continue
  178. img_crop = bgr_img[int(y1):int(y2), int(x1):int(x2)]
  179. # 计算span的对比度,低于0.20的span不进行ocr
  180. if calculate_contrast(img_crop, img_mode='bgr') <= 0.17:
  181. cell_box_map[i] = [[box, "", 0.1]]
  182. # logger.debug(f"Box {i} skipped due to low contrast.")
  183. continue
  184. img_crop_list.append(img_crop)
  185. img_crop_info_list.append([i, box])
  186. if len(img_crop_list) > 0:
  187. # 进行ocr识别
  188. ocr_result = self.ocr_engine.ocr(img_crop_list, det=False)
  189. # ocr_result = [[]]
  190. # for crop_img in img_crop_list:
  191. # tmp_ocr_result = self.ocr_engine.ocr(crop_img)
  192. # if tmp_ocr_result[0] and len(tmp_ocr_result[0]) > 0 and isinstance(tmp_ocr_result[0], list) and len(tmp_ocr_result[0][0]) == 2:
  193. # ocr_result[0].append(tmp_ocr_result[0][0][1])
  194. # else:
  195. # ocr_result[0].append(("", 0.0))
  196. if not ocr_result or not isinstance(ocr_result, list) or len(ocr_result) == 0:
  197. logger.warning("OCR engine returned no results or invalid result for image crops.")
  198. return cell_box_map
  199. ocr_res_list = ocr_result[0]
  200. if not isinstance(ocr_res_list, list) or len(ocr_res_list) != len(img_crop_list):
  201. logger.warning("OCR result list length does not match image crop list length.")
  202. return cell_box_map
  203. for j, ocr_res in enumerate(ocr_res_list):
  204. img_crop_info_list[j].append(ocr_res)
  205. for i, box, ocr_res in img_crop_info_list:
  206. # 处理ocr结果
  207. ocr_text, ocr_score = ocr_res
  208. # logger.debug(f"OCR result for box {i}: {ocr_text} with score {ocr_score}")
  209. if ocr_score < 0.6 or ocr_text in ['1','口','■','(204号', '(20', '(2', '(2号', '(20号', '号', '(204']:
  210. # logger.warning(f"Low confidence OCR result for box {i}: {ocr_text} with score {ocr_score}")
  211. box = sorted_polygons[i]
  212. cell_box_map[i] = [[box, "", 0.1]]
  213. continue
  214. cell_box_map[i] = [[box, ocr_text, ocr_score]]
  215. return cell_box_map
  216. def escape_html(input_string):
  217. """Escape HTML Entities."""
  218. return html.escape(input_string)
  219. def count_table_cells_physical(html_code):
  220. """计算表格的物理单元格数量(合并单元格算一个)"""
  221. if not html_code:
  222. return 0
  223. # 简单计数td和th标签的数量
  224. html_lower = html_code.lower()
  225. td_count = html_lower.count('<td')
  226. th_count = html_lower.count('<th')
  227. return td_count + th_count
  228. class UnetTableModel:
  229. def __init__(self, ocr_engine):
  230. model_path = os.path.join(auto_download_and_get_model_root_path(ModelPath.unet_structure), ModelPath.unet_structure)
  231. wired_input_args = WiredTableInput(model_path=model_path)
  232. self.wired_table_model = WiredTableRecognition(wired_input_args, ocr_engine)
  233. self.ocr_engine = ocr_engine
  234. def predict(self, input_img, ocr_result, wireless_html_code):
  235. if isinstance(input_img, Image.Image):
  236. np_img = np.asarray(input_img)
  237. elif isinstance(input_img, np.ndarray):
  238. np_img = input_img
  239. else:
  240. raise ValueError("Input must be a pillow object or a numpy array.")
  241. bgr_img = cv2.cvtColor(np_img, cv2.COLOR_RGB2BGR)
  242. if ocr_result is None:
  243. ocr_result = self.ocr_engine.ocr(bgr_img)[0]
  244. ocr_result = [
  245. [item[0], escape_html(item[1][0]), item[1][1]]
  246. for item in ocr_result
  247. if len(item) == 2 and isinstance(item[1], tuple)
  248. ]
  249. try:
  250. wired_table_results = self.wired_table_model(np_img, ocr_result)
  251. # viser = VisTable()
  252. # save_html_path = f"outputs/output.html"
  253. # save_drawed_path = f"outputs/output_table_vis.jpg"
  254. # save_logic_path = (
  255. # f"outputs/output_table_vis_logic.jpg"
  256. # )
  257. # vis_imged = viser(
  258. # np_img, wired_table_results, save_html_path, save_drawed_path, save_logic_path
  259. # )
  260. wired_html_code = wired_table_results.pred_html
  261. wired_len = count_table_cells_physical(wired_html_code)
  262. wireless_len = count_table_cells_physical(wireless_html_code)
  263. # 计算两种模型检测的单元格数量差异
  264. gap_of_len = wireless_len - wired_len
  265. # logger.debug(f"wired table cell bboxes: {wired_len}, wireless table cell bboxes: {wireless_len}")
  266. # 使用OCR结果计算两种模型填入的文字数量
  267. wireless_text_count = 0
  268. wired_text_count = 0
  269. for ocr_res in ocr_result:
  270. if ocr_res[1] in wireless_html_code:
  271. wireless_text_count += 1
  272. if ocr_res[1] in wired_html_code:
  273. wired_text_count += 1
  274. # logger.debug(f"wireless table ocr text count: {wireless_text_count}, wired table ocr text count: {wired_text_count}")
  275. # 使用HTML解析器计算空单元格数量
  276. wireless_soup = BeautifulSoup(wireless_html_code, 'html.parser') if wireless_html_code else BeautifulSoup("", 'html.parser')
  277. wired_soup = BeautifulSoup(wired_html_code, 'html.parser') if wired_html_code else BeautifulSoup("", 'html.parser')
  278. # 计算空单元格数量(没有文本内容或只有空白字符)
  279. wireless_blank_count = sum(1 for cell in wireless_soup.find_all(['td', 'th']) if not cell.text.strip())
  280. wired_blank_count = sum(1 for cell in wired_soup.find_all(['td', 'th']) if not cell.text.strip())
  281. # logger.debug(f"wireless table blank cell count: {wireless_blank_count}, wired table blank cell count: {wired_blank_count}")
  282. # 计算非空单元格数量
  283. wireless_non_blank_count = wireless_len - wireless_blank_count
  284. wired_non_blank_count = wired_len - wired_blank_count
  285. # 无线表非空格数量大于有线表非空格数量时,才考虑切换
  286. switch_flag = False
  287. if wireless_non_blank_count > wired_non_blank_count:
  288. # 假设非空表格是接近正方表,使用非空单元格数量开平方作为表格规模的估计
  289. wired_table_scale = round(wired_non_blank_count ** 0.5)
  290. # logger.debug(f"wireless non-blank cell count: {wireless_non_blank_count}, wired non-blank cell count: {wired_non_blank_count}, wired table scale: {wired_table_scale}")
  291. # 如果无线表非空格的数量比有线表多一列或以上,需要切换到无线表
  292. wired_scale_plus_2_cols = wired_non_blank_count + (wired_table_scale * 2)
  293. wired_scale_squared_plus_2_rows = wired_table_scale * (wired_table_scale + 2)
  294. if (wireless_non_blank_count + 3) >= max(wired_scale_plus_2_cols, wired_scale_squared_plus_2_rows):
  295. switch_flag = True
  296. # 判断是否使用无线表格模型的结果
  297. if (
  298. switch_flag
  299. or (0 <= gap_of_len <= 5 and wired_len <= round(wireless_len * 0.75)) # 两者相差不大但有线模型结果较少
  300. or (gap_of_len == 0 and wired_len <= 4) # 单元格数量完全相等且总量小于等于4
  301. or (wired_text_count <= wireless_text_count * 0.6 and wireless_text_count >=10) # 有线模型填入的文字明显少于无线模型
  302. ):
  303. # logger.debug("fall back to wireless table model")
  304. html_code = wireless_html_code
  305. else:
  306. html_code = wired_html_code
  307. return html_code
  308. except Exception as e:
  309. logger.warning(e)
  310. return wireless_html_code