self_modify.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388
  1. import time
  2. import copy
  3. import base64
  4. import cv2
  5. import numpy as np
  6. from io import BytesIO
  7. from PIL import Image
  8. from paddleocr import PaddleOCR
  9. from paddleocr.ppocr.utils.logging import get_logger
  10. from paddleocr.ppocr.utils.utility import check_and_read, alpha_to_color, binarize_img
  11. from paddleocr.tools.infer.utility import draw_ocr_box_txt, get_rotate_crop_image, get_minarea_rect_crop
  12. from magic_pdf.libs.boxbase import __is_overlaps_y_exceeds_threshold
  13. from magic_pdf.pre_proc.ocr_dict_merge import merge_spans_to_line
  14. logger = get_logger()
  15. def img_decode(content: bytes):
  16. np_arr = np.frombuffer(content, dtype=np.uint8)
  17. return cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
  18. def check_img(img):
  19. if isinstance(img, bytes):
  20. img = img_decode(img)
  21. if isinstance(img, str):
  22. image_file = img
  23. img, flag_gif, flag_pdf = check_and_read(image_file)
  24. if not flag_gif and not flag_pdf:
  25. with open(image_file, 'rb') as f:
  26. img_str = f.read()
  27. img = img_decode(img_str)
  28. if img is None:
  29. try:
  30. buf = BytesIO()
  31. image = BytesIO(img_str)
  32. im = Image.open(image)
  33. rgb = im.convert('RGB')
  34. rgb.save(buf, 'jpeg')
  35. buf.seek(0)
  36. image_bytes = buf.read()
  37. data_base64 = str(base64.b64encode(image_bytes),
  38. encoding="utf-8")
  39. image_decode = base64.b64decode(data_base64)
  40. img_array = np.frombuffer(image_decode, np.uint8)
  41. img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
  42. except:
  43. logger.error("error in loading image:{}".format(image_file))
  44. return None
  45. if img is None:
  46. logger.error("error in loading image:{}".format(image_file))
  47. return None
  48. if isinstance(img, np.ndarray) and len(img.shape) == 2:
  49. img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
  50. return img
  51. def sorted_boxes(dt_boxes):
  52. """
  53. Sort text boxes in order from top to bottom, left to right
  54. args:
  55. dt_boxes(array):detected text boxes with shape [4, 2]
  56. return:
  57. sorted boxes(array) with shape [4, 2]
  58. """
  59. num_boxes = dt_boxes.shape[0]
  60. sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
  61. _boxes = list(sorted_boxes)
  62. for i in range(num_boxes - 1):
  63. for j in range(i, -1, -1):
  64. if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
  65. (_boxes[j + 1][0][0] < _boxes[j][0][0]):
  66. tmp = _boxes[j]
  67. _boxes[j] = _boxes[j + 1]
  68. _boxes[j + 1] = tmp
  69. else:
  70. break
  71. return _boxes
  72. def bbox_to_points(bbox):
  73. """ 将bbox格式转换为四个顶点的数组 """
  74. x0, y0, x1, y1 = bbox
  75. return np.array([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]).astype('float32')
  76. def points_to_bbox(points):
  77. """ 将四个顶点的数组转换为bbox格式 """
  78. x0, y0 = points[0]
  79. x1, _ = points[1]
  80. _, y1 = points[2]
  81. return [x0, y0, x1, y1]
  82. def merge_intervals(intervals):
  83. # Sort the intervals based on the start value
  84. intervals.sort(key=lambda x: x[0])
  85. merged = []
  86. for interval in intervals:
  87. # If the list of merged intervals is empty or if the current
  88. # interval does not overlap with the previous, simply append it.
  89. if not merged or merged[-1][1] < interval[0]:
  90. merged.append(interval)
  91. else:
  92. # Otherwise, there is overlap, so we merge the current and previous intervals.
  93. merged[-1][1] = max(merged[-1][1], interval[1])
  94. return merged
  95. def remove_intervals(original, masks):
  96. # Merge all mask intervals
  97. merged_masks = merge_intervals(masks)
  98. result = []
  99. original_start, original_end = original
  100. for mask in merged_masks:
  101. mask_start, mask_end = mask
  102. # If the mask starts after the original range, ignore it
  103. if mask_start > original_end:
  104. continue
  105. # If the mask ends before the original range starts, ignore it
  106. if mask_end < original_start:
  107. continue
  108. # Remove the masked part from the original range
  109. if original_start < mask_start:
  110. result.append([original_start, mask_start - 1])
  111. original_start = max(mask_end + 1, original_start)
  112. # Add the remaining part of the original range, if any
  113. if original_start <= original_end:
  114. result.append([original_start, original_end])
  115. return result
  116. def update_det_boxes(dt_boxes, mfd_res):
  117. new_dt_boxes = []
  118. for text_box in dt_boxes:
  119. text_bbox = points_to_bbox(text_box)
  120. masks_list = []
  121. for mf_box in mfd_res:
  122. mf_bbox = mf_box['bbox']
  123. if __is_overlaps_y_exceeds_threshold(text_bbox, mf_bbox):
  124. masks_list.append([mf_bbox[0], mf_bbox[2]])
  125. text_x_range = [text_bbox[0], text_bbox[2]]
  126. text_remove_mask_range = remove_intervals(text_x_range, masks_list)
  127. temp_dt_box = []
  128. for text_remove_mask in text_remove_mask_range:
  129. temp_dt_box.append(bbox_to_points([text_remove_mask[0], text_bbox[1], text_remove_mask[1], text_bbox[3]]))
  130. if len(temp_dt_box) > 0:
  131. new_dt_boxes.extend(temp_dt_box)
  132. return new_dt_boxes
  133. def merge_overlapping_spans(spans):
  134. """
  135. Merges overlapping spans on the same line.
  136. :param spans: A list of span coordinates [(x1, y1, x2, y2), ...]
  137. :return: A list of merged spans
  138. """
  139. # Return an empty list if the input spans list is empty
  140. if not spans:
  141. return []
  142. # Sort spans by their starting x-coordinate
  143. spans.sort(key=lambda x: x[0])
  144. # Initialize the list of merged spans
  145. merged = []
  146. for span in spans:
  147. # Unpack span coordinates
  148. x1, y1, x2, y2 = span
  149. # If the merged list is empty or there's no horizontal overlap, add the span directly
  150. if not merged or merged[-1][2] < x1:
  151. merged.append(span)
  152. else:
  153. # If there is horizontal overlap, merge the current span with the previous one
  154. last_span = merged.pop()
  155. # Update the merged span's top-left corner to the smaller (x1, y1) and bottom-right to the larger (x2, y2)
  156. x1 = min(last_span[0], x1)
  157. y1 = min(last_span[1], y1)
  158. x2 = max(last_span[2], x2)
  159. y2 = max(last_span[3], y2)
  160. # Add the merged span back to the list
  161. merged.append((x1, y1, x2, y2))
  162. # Return the list of merged spans
  163. return merged
  164. def merge_det_boxes(dt_boxes):
  165. """
  166. Merge detection boxes.
  167. This function takes a list of detected bounding boxes, each represented by four corner points.
  168. The goal is to merge these bounding boxes into larger text regions.
  169. Parameters:
  170. dt_boxes (list): A list containing multiple text detection boxes, where each box is defined by four corner points.
  171. Returns:
  172. list: A list containing the merged text regions, where each region is represented by four corner points.
  173. """
  174. # Convert the detection boxes into a dictionary format with bounding boxes and type
  175. dt_boxes_dict_list = []
  176. for text_box in dt_boxes:
  177. text_bbox = points_to_bbox(text_box)
  178. text_box_dict = {
  179. 'bbox': text_bbox,
  180. 'type': 'text',
  181. }
  182. dt_boxes_dict_list.append(text_box_dict)
  183. # Merge adjacent text regions into lines
  184. lines = merge_spans_to_line(dt_boxes_dict_list)
  185. # Initialize a new list for storing the merged text regions
  186. new_dt_boxes = []
  187. for line in lines:
  188. line_bbox_list = []
  189. for span in line:
  190. line_bbox_list.append(span['bbox'])
  191. # Merge overlapping text regions within the same line
  192. merged_spans = merge_overlapping_spans(line_bbox_list)
  193. # Convert the merged text regions back to point format and add them to the new detection box list
  194. for span in merged_spans:
  195. new_dt_boxes.append(bbox_to_points(span))
  196. return new_dt_boxes
  197. class ModifiedPaddleOCR(PaddleOCR):
  198. def ocr(self, img, det=True, rec=True, cls=True, bin=False, inv=False, mfd_res=None, alpha_color=(255, 255, 255)):
  199. """
  200. OCR with PaddleOCR
  201. args:
  202. img: img for OCR, support ndarray, img_path and list or ndarray
  203. det: use text detection or not. If False, only rec will be exec. Default is True
  204. rec: use text recognition or not. If False, only det will be exec. Default is True
  205. cls: use angle classifier or not. Default is True. If True, the text with rotation of 180 degrees can be recognized. If no text is rotated by 180 degrees, use cls=False to get better performance. Text with rotation of 90 or 270 degrees can be recognized even if cls=False.
  206. bin: binarize image to black and white. Default is False.
  207. inv: invert image colors. Default is False.
  208. alpha_color: set RGB color Tuple for transparent parts replacement. Default is pure white.
  209. """
  210. assert isinstance(img, (np.ndarray, list, str, bytes))
  211. if isinstance(img, list) and det == True:
  212. logger.error('When input a list of images, det must be false')
  213. exit(0)
  214. if cls == True and self.use_angle_cls == False:
  215. pass
  216. # logger.warning(
  217. # 'Since the angle classifier is not initialized, it will not be used during the forward process'
  218. # )
  219. img = check_img(img)
  220. # for infer pdf file
  221. if isinstance(img, list):
  222. if self.page_num > len(img) or self.page_num == 0:
  223. self.page_num = len(img)
  224. imgs = img[:self.page_num]
  225. else:
  226. imgs = [img]
  227. def preprocess_image(_image):
  228. _image = alpha_to_color(_image, alpha_color)
  229. if inv:
  230. _image = cv2.bitwise_not(_image)
  231. if bin:
  232. _image = binarize_img(_image)
  233. return _image
  234. if det and rec:
  235. ocr_res = []
  236. for idx, img in enumerate(imgs):
  237. img = preprocess_image(img)
  238. dt_boxes, rec_res, _ = self.__call__(img, cls, mfd_res=mfd_res)
  239. if not dt_boxes and not rec_res:
  240. ocr_res.append(None)
  241. continue
  242. tmp_res = [[box.tolist(), res]
  243. for box, res in zip(dt_boxes, rec_res)]
  244. ocr_res.append(tmp_res)
  245. return ocr_res
  246. elif det and not rec:
  247. ocr_res = []
  248. for idx, img in enumerate(imgs):
  249. img = preprocess_image(img)
  250. dt_boxes, elapse = self.text_detector(img)
  251. if not dt_boxes:
  252. ocr_res.append(None)
  253. continue
  254. tmp_res = [box.tolist() for box in dt_boxes]
  255. ocr_res.append(tmp_res)
  256. return ocr_res
  257. else:
  258. ocr_res = []
  259. cls_res = []
  260. for idx, img in enumerate(imgs):
  261. if not isinstance(img, list):
  262. img = preprocess_image(img)
  263. img = [img]
  264. if self.use_angle_cls and cls:
  265. img, cls_res_tmp, elapse = self.text_classifier(img)
  266. if not rec:
  267. cls_res.append(cls_res_tmp)
  268. rec_res, elapse = self.text_recognizer(img)
  269. ocr_res.append(rec_res)
  270. if not rec:
  271. return cls_res
  272. return ocr_res
  273. def __call__(self, img, cls=True, mfd_res=None):
  274. time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
  275. if img is None:
  276. logger.debug("no valid image provided")
  277. return None, None, time_dict
  278. start = time.time()
  279. ori_im = img.copy()
  280. dt_boxes, elapse = self.text_detector(img)
  281. time_dict['det'] = elapse
  282. if dt_boxes is None:
  283. logger.debug("no dt_boxes found, elapsed : {}".format(elapse))
  284. end = time.time()
  285. time_dict['all'] = end - start
  286. return None, None, time_dict
  287. else:
  288. logger.debug("dt_boxes num : {}, elapsed : {}".format(
  289. len(dt_boxes), elapse))
  290. img_crop_list = []
  291. dt_boxes = sorted_boxes(dt_boxes)
  292. dt_boxes = merge_det_boxes(dt_boxes)
  293. if mfd_res:
  294. bef = time.time()
  295. dt_boxes = update_det_boxes(dt_boxes, mfd_res)
  296. aft = time.time()
  297. logger.debug("split text box by formula, new dt_boxes num : {}, elapsed : {}".format(
  298. len(dt_boxes), aft - bef))
  299. for bno in range(len(dt_boxes)):
  300. tmp_box = copy.deepcopy(dt_boxes[bno])
  301. if self.args.det_box_type == "quad":
  302. img_crop = get_rotate_crop_image(ori_im, tmp_box)
  303. else:
  304. img_crop = get_minarea_rect_crop(ori_im, tmp_box)
  305. img_crop_list.append(img_crop)
  306. if self.use_angle_cls and cls:
  307. img_crop_list, angle_list, elapse = self.text_classifier(
  308. img_crop_list)
  309. time_dict['cls'] = elapse
  310. logger.debug("cls num : {}, elapsed : {}".format(
  311. len(img_crop_list), elapse))
  312. rec_res, elapse = self.text_recognizer(img_crop_list)
  313. time_dict['rec'] = elapse
  314. logger.debug("rec_res num : {}, elapsed : {}".format(
  315. len(rec_res), elapse))
  316. if self.args.save_crop_res:
  317. self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list,
  318. rec_res)
  319. filter_boxes, filter_rec_res = [], []
  320. for box, rec_result in zip(dt_boxes, rec_res):
  321. text, score = rec_result
  322. if score >= self.drop_score:
  323. filter_boxes.append(box)
  324. filter_rec_res.append(rec_result)
  325. end = time.time()
  326. time_dict['all'] = end - start
  327. return filter_boxes, filter_rec_res, time_dict