ocr_utils.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367
  1. # Copyright (c) Opendatalab. All rights reserved.
  2. import copy
  3. import cv2
  4. import numpy as np
  5. from magic_pdf.pre_proc.ocr_dict_merge import merge_spans_to_line
  6. from magic_pdf.libs.boxbase import __is_overlaps_y_exceeds_threshold
  7. def img_decode(content: bytes):
  8. np_arr = np.frombuffer(content, dtype=np.uint8)
  9. return cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
  10. def check_img(img):
  11. if isinstance(img, bytes):
  12. img = img_decode(img)
  13. if isinstance(img, np.ndarray) and len(img.shape) == 2:
  14. img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
  15. return img
  16. def alpha_to_color(img, alpha_color=(255, 255, 255)):
  17. if len(img.shape) == 3 and img.shape[2] == 4:
  18. B, G, R, A = cv2.split(img)
  19. alpha = A / 255
  20. R = (alpha_color[0] * (1 - alpha) + R * alpha).astype(np.uint8)
  21. G = (alpha_color[1] * (1 - alpha) + G * alpha).astype(np.uint8)
  22. B = (alpha_color[2] * (1 - alpha) + B * alpha).astype(np.uint8)
  23. img = cv2.merge((B, G, R))
  24. return img
  25. def preprocess_image(_image):
  26. alpha_color = (255, 255, 255)
  27. _image = alpha_to_color(_image, alpha_color)
  28. return _image
  29. def sorted_boxes(dt_boxes):
  30. """
  31. Sort text boxes in order from top to bottom, left to right
  32. args:
  33. dt_boxes(array):detected text boxes with shape [4, 2]
  34. return:
  35. sorted boxes(array) with shape [4, 2]
  36. """
  37. num_boxes = dt_boxes.shape[0]
  38. sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
  39. _boxes = list(sorted_boxes)
  40. for i in range(num_boxes - 1):
  41. for j in range(i, -1, -1):
  42. if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
  43. (_boxes[j + 1][0][0] < _boxes[j][0][0]):
  44. tmp = _boxes[j]
  45. _boxes[j] = _boxes[j + 1]
  46. _boxes[j + 1] = tmp
  47. else:
  48. break
  49. return _boxes
  50. def bbox_to_points(bbox):
  51. """ 将bbox格式转换为四个顶点的数组 """
  52. x0, y0, x1, y1 = bbox
  53. return np.array([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]).astype('float32')
  54. def points_to_bbox(points):
  55. """ 将四个顶点的数组转换为bbox格式 """
  56. x0, y0 = points[0]
  57. x1, _ = points[1]
  58. _, y1 = points[2]
  59. return [x0, y0, x1, y1]
  60. def merge_intervals(intervals):
  61. # Sort the intervals based on the start value
  62. intervals.sort(key=lambda x: x[0])
  63. merged = []
  64. for interval in intervals:
  65. # If the list of merged intervals is empty or if the current
  66. # interval does not overlap with the previous, simply append it.
  67. if not merged or merged[-1][1] < interval[0]:
  68. merged.append(interval)
  69. else:
  70. # Otherwise, there is overlap, so we merge the current and previous intervals.
  71. merged[-1][1] = max(merged[-1][1], interval[1])
  72. return merged
  73. def remove_intervals(original, masks):
  74. # Merge all mask intervals
  75. merged_masks = merge_intervals(masks)
  76. result = []
  77. original_start, original_end = original
  78. for mask in merged_masks:
  79. mask_start, mask_end = mask
  80. # If the mask starts after the original range, ignore it
  81. if mask_start > original_end:
  82. continue
  83. # If the mask ends before the original range starts, ignore it
  84. if mask_end < original_start:
  85. continue
  86. # Remove the masked part from the original range
  87. if original_start < mask_start:
  88. result.append([original_start, mask_start - 1])
  89. original_start = max(mask_end + 1, original_start)
  90. # Add the remaining part of the original range, if any
  91. if original_start <= original_end:
  92. result.append([original_start, original_end])
  93. return result
  94. def update_det_boxes(dt_boxes, mfd_res):
  95. new_dt_boxes = []
  96. angle_boxes_list = []
  97. for text_box in dt_boxes:
  98. if calculate_is_angle(text_box):
  99. angle_boxes_list.append(text_box)
  100. continue
  101. text_bbox = points_to_bbox(text_box)
  102. masks_list = []
  103. for mf_box in mfd_res:
  104. mf_bbox = mf_box['bbox']
  105. if __is_overlaps_y_exceeds_threshold(text_bbox, mf_bbox):
  106. masks_list.append([mf_bbox[0], mf_bbox[2]])
  107. text_x_range = [text_bbox[0], text_bbox[2]]
  108. text_remove_mask_range = remove_intervals(text_x_range, masks_list)
  109. temp_dt_box = []
  110. for text_remove_mask in text_remove_mask_range:
  111. temp_dt_box.append(bbox_to_points([text_remove_mask[0], text_bbox[1], text_remove_mask[1], text_bbox[3]]))
  112. if len(temp_dt_box) > 0:
  113. new_dt_boxes.extend(temp_dt_box)
  114. new_dt_boxes.extend(angle_boxes_list)
  115. return new_dt_boxes
  116. def merge_overlapping_spans(spans):
  117. """
  118. Merges overlapping spans on the same line.
  119. :param spans: A list of span coordinates [(x1, y1, x2, y2), ...]
  120. :return: A list of merged spans
  121. """
  122. # Return an empty list if the input spans list is empty
  123. if not spans:
  124. return []
  125. # Sort spans by their starting x-coordinate
  126. spans.sort(key=lambda x: x[0])
  127. # Initialize the list of merged spans
  128. merged = []
  129. for span in spans:
  130. # Unpack span coordinates
  131. x1, y1, x2, y2 = span
  132. # If the merged list is empty or there's no horizontal overlap, add the span directly
  133. if not merged or merged[-1][2] < x1:
  134. merged.append(span)
  135. else:
  136. # If there is horizontal overlap, merge the current span with the previous one
  137. last_span = merged.pop()
  138. # Update the merged span's top-left corner to the smaller (x1, y1) and bottom-right to the larger (x2, y2)
  139. x1 = min(last_span[0], x1)
  140. y1 = min(last_span[1], y1)
  141. x2 = max(last_span[2], x2)
  142. y2 = max(last_span[3], y2)
  143. # Add the merged span back to the list
  144. merged.append((x1, y1, x2, y2))
  145. # Return the list of merged spans
  146. return merged
  147. def merge_det_boxes(dt_boxes):
  148. """
  149. Merge detection boxes.
  150. This function takes a list of detected bounding boxes, each represented by four corner points.
  151. The goal is to merge these bounding boxes into larger text regions.
  152. Parameters:
  153. dt_boxes (list): A list containing multiple text detection boxes, where each box is defined by four corner points.
  154. Returns:
  155. list: A list containing the merged text regions, where each region is represented by four corner points.
  156. """
  157. # Convert the detection boxes into a dictionary format with bounding boxes and type
  158. dt_boxes_dict_list = []
  159. angle_boxes_list = []
  160. for text_box in dt_boxes:
  161. text_bbox = points_to_bbox(text_box)
  162. if calculate_is_angle(text_box):
  163. angle_boxes_list.append(text_box)
  164. continue
  165. text_box_dict = {
  166. 'bbox': text_bbox,
  167. 'type': 'text',
  168. }
  169. dt_boxes_dict_list.append(text_box_dict)
  170. # Merge adjacent text regions into lines
  171. lines = merge_spans_to_line(dt_boxes_dict_list)
  172. # Initialize a new list for storing the merged text regions
  173. new_dt_boxes = []
  174. for line in lines:
  175. line_bbox_list = []
  176. for span in line:
  177. line_bbox_list.append(span['bbox'])
  178. # Merge overlapping text regions within the same line
  179. merged_spans = merge_overlapping_spans(line_bbox_list)
  180. # Convert the merged text regions back to point format and add them to the new detection box list
  181. for span in merged_spans:
  182. new_dt_boxes.append(bbox_to_points(span))
  183. new_dt_boxes.extend(angle_boxes_list)
  184. return new_dt_boxes
  185. def get_adjusted_mfdetrec_res(single_page_mfdetrec_res, useful_list):
  186. paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
  187. # Adjust the coordinates of the formula area
  188. adjusted_mfdetrec_res = []
  189. for mf_res in single_page_mfdetrec_res:
  190. mf_xmin, mf_ymin, mf_xmax, mf_ymax = mf_res["bbox"]
  191. # Adjust the coordinates of the formula area to the coordinates relative to the cropping area
  192. x0 = mf_xmin - xmin + paste_x
  193. y0 = mf_ymin - ymin + paste_y
  194. x1 = mf_xmax - xmin + paste_x
  195. y1 = mf_ymax - ymin + paste_y
  196. # Filter formula blocks outside the graph
  197. if any([x1 < 0, y1 < 0]) or any([x0 > new_width, y0 > new_height]):
  198. continue
  199. else:
  200. adjusted_mfdetrec_res.append({
  201. "bbox": [x0, y0, x1, y1],
  202. })
  203. return adjusted_mfdetrec_res
  204. def get_ocr_result_list(ocr_res, useful_list, ocr_enable, new_image):
  205. paste_x, paste_y, xmin, ymin, xmax, ymax, new_width, new_height = useful_list
  206. ocr_result_list = []
  207. ori_im = new_image.copy()
  208. for box_ocr_res in ocr_res:
  209. if len(box_ocr_res) == 2:
  210. p1, p2, p3, p4 = box_ocr_res[0]
  211. text, score = box_ocr_res[1]
  212. # logger.info(f"text: {text}, score: {score}")
  213. if score < 0.6: # 过滤低置信度的结果
  214. continue
  215. else:
  216. p1, p2, p3, p4 = box_ocr_res
  217. text, score = "", 1
  218. if ocr_enable:
  219. tmp_box = copy.deepcopy(np.array([p1, p2, p3, p4]).astype('float32'))
  220. img_crop = get_rotate_crop_image(ori_im, tmp_box)
  221. # average_angle_degrees = calculate_angle_degrees(box_ocr_res[0])
  222. # if average_angle_degrees > 0.5:
  223. poly = [p1, p2, p3, p4]
  224. if calculate_is_angle(poly):
  225. # logger.info(f"average_angle_degrees: {average_angle_degrees}, text: {text}")
  226. # 与x轴的夹角超过0.5度,对边界做一下矫正
  227. # 计算几何中心
  228. x_center = sum(point[0] for point in poly) / 4
  229. y_center = sum(point[1] for point in poly) / 4
  230. new_height = ((p4[1] - p1[1]) + (p3[1] - p2[1])) / 2
  231. new_width = p3[0] - p1[0]
  232. p1 = [x_center - new_width / 2, y_center - new_height / 2]
  233. p2 = [x_center + new_width / 2, y_center - new_height / 2]
  234. p3 = [x_center + new_width / 2, y_center + new_height / 2]
  235. p4 = [x_center - new_width / 2, y_center + new_height / 2]
  236. # Convert the coordinates back to the original coordinate system
  237. p1 = [p1[0] - paste_x + xmin, p1[1] - paste_y + ymin]
  238. p2 = [p2[0] - paste_x + xmin, p2[1] - paste_y + ymin]
  239. p3 = [p3[0] - paste_x + xmin, p3[1] - paste_y + ymin]
  240. p4 = [p4[0] - paste_x + xmin, p4[1] - paste_y + ymin]
  241. if ocr_enable:
  242. ocr_result_list.append({
  243. 'category_id': 15,
  244. 'poly': p1 + p2 + p3 + p4,
  245. 'score': float(round(score, 2)),
  246. 'text': text,
  247. 'np_img': img_crop,
  248. })
  249. else:
  250. ocr_result_list.append({
  251. 'category_id': 15,
  252. 'poly': p1 + p2 + p3 + p4,
  253. 'score': float(round(score, 2)),
  254. 'text': text,
  255. })
  256. return ocr_result_list
  257. def calculate_is_angle(poly):
  258. p1, p2, p3, p4 = poly
  259. height = ((p4[1] - p1[1]) + (p3[1] - p2[1])) / 2
  260. if 0.8 * height <= (p3[1] - p1[1]) <= 1.2 * height:
  261. return False
  262. else:
  263. # logger.info((p3[1] - p1[1])/height)
  264. return True
  265. def get_rotate_crop_image(img, points):
  266. '''
  267. img_height, img_width = img.shape[0:2]
  268. left = int(np.min(points[:, 0]))
  269. right = int(np.max(points[:, 0]))
  270. top = int(np.min(points[:, 1]))
  271. bottom = int(np.max(points[:, 1]))
  272. img_crop = img[top:bottom, left:right, :].copy()
  273. points[:, 0] = points[:, 0] - left
  274. points[:, 1] = points[:, 1] - top
  275. '''
  276. assert len(points) == 4, "shape of points must be 4*2"
  277. img_crop_width = int(
  278. max(
  279. np.linalg.norm(points[0] - points[1]),
  280. np.linalg.norm(points[2] - points[3])))
  281. img_crop_height = int(
  282. max(
  283. np.linalg.norm(points[0] - points[3]),
  284. np.linalg.norm(points[1] - points[2])))
  285. pts_std = np.float32([[0, 0], [img_crop_width, 0],
  286. [img_crop_width, img_crop_height],
  287. [0, img_crop_height]])
  288. M = cv2.getPerspectiveTransform(points, pts_std)
  289. dst_img = cv2.warpPerspective(
  290. img,
  291. M, (img_crop_width, img_crop_height),
  292. borderMode=cv2.BORDER_REPLICATE,
  293. flags=cv2.INTER_CUBIC)
  294. dst_img_height, dst_img_width = dst_img.shape[0:2]
  295. if dst_img_height * 1.0 / dst_img_width >= 1.5:
  296. dst_img = np.rot90(dst_img)
  297. return dst_img