self_modify.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. import time
  2. import copy
  3. import base64
  4. import cv2
  5. import numpy as np
  6. from io import BytesIO
  7. from PIL import Image
  8. from paddleocr import PaddleOCR
  9. from paddleocr.ppocr.utils.logging import get_logger
  10. from paddleocr.ppocr.utils.utility import check_and_read, alpha_to_color, binarize_img
  11. from paddleocr.tools.infer.utility import draw_ocr_box_txt, get_rotate_crop_image, get_minarea_rect_crop
  12. from magic_pdf.libs.boxbase import __is_overlaps_y_exceeds_threshold
  13. logger = get_logger()
  14. def img_decode(content: bytes):
  15. np_arr = np.frombuffer(content, dtype=np.uint8)
  16. return cv2.imdecode(np_arr, cv2.IMREAD_UNCHANGED)
  17. def check_img(img):
  18. if isinstance(img, bytes):
  19. img = img_decode(img)
  20. if isinstance(img, str):
  21. image_file = img
  22. img, flag_gif, flag_pdf = check_and_read(image_file)
  23. if not flag_gif and not flag_pdf:
  24. with open(image_file, 'rb') as f:
  25. img_str = f.read()
  26. img = img_decode(img_str)
  27. if img is None:
  28. try:
  29. buf = BytesIO()
  30. image = BytesIO(img_str)
  31. im = Image.open(image)
  32. rgb = im.convert('RGB')
  33. rgb.save(buf, 'jpeg')
  34. buf.seek(0)
  35. image_bytes = buf.read()
  36. data_base64 = str(base64.b64encode(image_bytes),
  37. encoding="utf-8")
  38. image_decode = base64.b64decode(data_base64)
  39. img_array = np.frombuffer(image_decode, np.uint8)
  40. img = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
  41. except:
  42. logger.error("error in loading image:{}".format(image_file))
  43. return None
  44. if img is None:
  45. logger.error("error in loading image:{}".format(image_file))
  46. return None
  47. if isinstance(img, np.ndarray) and len(img.shape) == 2:
  48. img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
  49. return img
  50. def sorted_boxes(dt_boxes):
  51. """
  52. Sort text boxes in order from top to bottom, left to right
  53. args:
  54. dt_boxes(array):detected text boxes with shape [4, 2]
  55. return:
  56. sorted boxes(array) with shape [4, 2]
  57. """
  58. num_boxes = dt_boxes.shape[0]
  59. sorted_boxes = sorted(dt_boxes, key=lambda x: (x[0][1], x[0][0]))
  60. _boxes = list(sorted_boxes)
  61. for i in range(num_boxes - 1):
  62. for j in range(i, -1, -1):
  63. if abs(_boxes[j + 1][0][1] - _boxes[j][0][1]) < 10 and \
  64. (_boxes[j + 1][0][0] < _boxes[j][0][0]):
  65. tmp = _boxes[j]
  66. _boxes[j] = _boxes[j + 1]
  67. _boxes[j + 1] = tmp
  68. else:
  69. break
  70. return _boxes
  71. def bbox_to_points(bbox):
  72. """ 将bbox格式转换为四个顶点的数组 """
  73. x0, y0, x1, y1 = bbox
  74. return np.array([[x0, y0], [x1, y0], [x1, y1], [x0, y1]]).astype('float32')
  75. def points_to_bbox(points):
  76. """ 将四个顶点的数组转换为bbox格式 """
  77. x0, y0 = points[0]
  78. x1, _ = points[1]
  79. _, y1 = points[2]
  80. return [x0, y0, x1, y1]
  81. def merge_intervals(intervals):
  82. # Sort the intervals based on the start value
  83. intervals.sort(key=lambda x: x[0])
  84. merged = []
  85. for interval in intervals:
  86. # If the list of merged intervals is empty or if the current
  87. # interval does not overlap with the previous, simply append it.
  88. if not merged or merged[-1][1] < interval[0]:
  89. merged.append(interval)
  90. else:
  91. # Otherwise, there is overlap, so we merge the current and previous intervals.
  92. merged[-1][1] = max(merged[-1][1], interval[1])
  93. return merged
  94. def remove_intervals(original, masks):
  95. # Merge all mask intervals
  96. merged_masks = merge_intervals(masks)
  97. result = []
  98. original_start, original_end = original
  99. for mask in merged_masks:
  100. mask_start, mask_end = mask
  101. # If the mask starts after the original range, ignore it
  102. if mask_start > original_end:
  103. continue
  104. # If the mask ends before the original range starts, ignore it
  105. if mask_end < original_start:
  106. continue
  107. # Remove the masked part from the original range
  108. if original_start < mask_start:
  109. result.append([original_start, mask_start - 1])
  110. original_start = max(mask_end + 1, original_start)
  111. # Add the remaining part of the original range, if any
  112. if original_start <= original_end:
  113. result.append([original_start, original_end])
  114. return result
  115. def update_det_boxes(dt_boxes, mfd_res):
  116. new_dt_boxes = []
  117. for text_box in dt_boxes:
  118. text_bbox = points_to_bbox(text_box)
  119. masks_list = []
  120. for mf_box in mfd_res:
  121. mf_bbox = mf_box['bbox']
  122. if __is_overlaps_y_exceeds_threshold(text_bbox, mf_bbox):
  123. masks_list.append([mf_bbox[0], mf_bbox[2]])
  124. text_x_range = [text_bbox[0], text_bbox[2]]
  125. text_remove_mask_range = remove_intervals(text_x_range, masks_list)
  126. temp_dt_box = []
  127. for text_remove_mask in text_remove_mask_range:
  128. temp_dt_box.append(bbox_to_points([text_remove_mask[0], text_bbox[1], text_remove_mask[1], text_bbox[3]]))
  129. if len(temp_dt_box) > 0:
  130. new_dt_boxes.extend(temp_dt_box)
  131. return new_dt_boxes
  132. class ModifiedPaddleOCR(PaddleOCR):
  133. def ocr(self, img, det=True, rec=True, cls=True, bin=False, inv=False, mfd_res=None, alpha_color=(255, 255, 255)):
  134. """
  135. OCR with PaddleOCR
  136. args:
  137. img: img for OCR, support ndarray, img_path and list or ndarray
  138. det: use text detection or not. If False, only rec will be exec. Default is True
  139. rec: use text recognition or not. If False, only det will be exec. Default is True
  140. cls: use angle classifier or not. Default is True. If True, the text with rotation of 180 degrees can be recognized. If no text is rotated by 180 degrees, use cls=False to get better performance. Text with rotation of 90 or 270 degrees can be recognized even if cls=False.
  141. bin: binarize image to black and white. Default is False.
  142. inv: invert image colors. Default is False.
  143. alpha_color: set RGB color Tuple for transparent parts replacement. Default is pure white.
  144. """
  145. assert isinstance(img, (np.ndarray, list, str, bytes))
  146. if isinstance(img, list) and det == True:
  147. logger.error('When input a list of images, det must be false')
  148. exit(0)
  149. if cls == True and self.use_angle_cls == False:
  150. pass
  151. # logger.warning(
  152. # 'Since the angle classifier is not initialized, it will not be used during the forward process'
  153. # )
  154. img = check_img(img)
  155. # for infer pdf file
  156. if isinstance(img, list):
  157. if self.page_num > len(img) or self.page_num == 0:
  158. self.page_num = len(img)
  159. imgs = img[:self.page_num]
  160. else:
  161. imgs = [img]
  162. def preprocess_image(_image):
  163. _image = alpha_to_color(_image, alpha_color)
  164. if inv:
  165. _image = cv2.bitwise_not(_image)
  166. if bin:
  167. _image = binarize_img(_image)
  168. return _image
  169. if det and rec:
  170. ocr_res = []
  171. for idx, img in enumerate(imgs):
  172. img = preprocess_image(img)
  173. dt_boxes, rec_res, _ = self.__call__(img, cls, mfd_res=mfd_res)
  174. if not dt_boxes and not rec_res:
  175. ocr_res.append(None)
  176. continue
  177. tmp_res = [[box.tolist(), res]
  178. for box, res in zip(dt_boxes, rec_res)]
  179. ocr_res.append(tmp_res)
  180. return ocr_res
  181. elif det and not rec:
  182. ocr_res = []
  183. for idx, img in enumerate(imgs):
  184. img = preprocess_image(img)
  185. dt_boxes, elapse = self.text_detector(img)
  186. if not dt_boxes:
  187. ocr_res.append(None)
  188. continue
  189. tmp_res = [box.tolist() for box in dt_boxes]
  190. ocr_res.append(tmp_res)
  191. return ocr_res
  192. else:
  193. ocr_res = []
  194. cls_res = []
  195. for idx, img in enumerate(imgs):
  196. if not isinstance(img, list):
  197. img = preprocess_image(img)
  198. img = [img]
  199. if self.use_angle_cls and cls:
  200. img, cls_res_tmp, elapse = self.text_classifier(img)
  201. if not rec:
  202. cls_res.append(cls_res_tmp)
  203. rec_res, elapse = self.text_recognizer(img)
  204. ocr_res.append(rec_res)
  205. if not rec:
  206. return cls_res
  207. return ocr_res
  208. def __call__(self, img, cls=True, mfd_res=None):
  209. time_dict = {'det': 0, 'rec': 0, 'cls': 0, 'all': 0}
  210. if img is None:
  211. logger.debug("no valid image provided")
  212. return None, None, time_dict
  213. start = time.time()
  214. ori_im = img.copy()
  215. dt_boxes, elapse = self.text_detector(img)
  216. time_dict['det'] = elapse
  217. if dt_boxes is None:
  218. logger.debug("no dt_boxes found, elapsed : {}".format(elapse))
  219. end = time.time()
  220. time_dict['all'] = end - start
  221. return None, None, time_dict
  222. else:
  223. logger.debug("dt_boxes num : {}, elapsed : {}".format(
  224. len(dt_boxes), elapse))
  225. img_crop_list = []
  226. dt_boxes = sorted_boxes(dt_boxes)
  227. if mfd_res:
  228. bef = time.time()
  229. dt_boxes = update_det_boxes(dt_boxes, mfd_res)
  230. aft = time.time()
  231. logger.debug("split text box by formula, new dt_boxes num : {}, elapsed : {}".format(
  232. len(dt_boxes), aft - bef))
  233. for bno in range(len(dt_boxes)):
  234. tmp_box = copy.deepcopy(dt_boxes[bno])
  235. if self.args.det_box_type == "quad":
  236. img_crop = get_rotate_crop_image(ori_im, tmp_box)
  237. else:
  238. img_crop = get_minarea_rect_crop(ori_im, tmp_box)
  239. img_crop_list.append(img_crop)
  240. if self.use_angle_cls and cls:
  241. img_crop_list, angle_list, elapse = self.text_classifier(
  242. img_crop_list)
  243. time_dict['cls'] = elapse
  244. logger.debug("cls num : {}, elapsed : {}".format(
  245. len(img_crop_list), elapse))
  246. rec_res, elapse = self.text_recognizer(img_crop_list)
  247. time_dict['rec'] = elapse
  248. logger.debug("rec_res num : {}, elapsed : {}".format(
  249. len(rec_res), elapse))
  250. if self.args.save_crop_res:
  251. self.draw_crop_rec_res(self.args.crop_res_save_dir, img_crop_list,
  252. rec_res)
  253. filter_boxes, filter_rec_res = [], []
  254. for box, rec_result in zip(dt_boxes, rec_res):
  255. text, score = rec_result
  256. if score >= self.drop_score:
  257. filter_boxes.append(box)
  258. filter_rec_res.append(rec_result)
  259. end = time.time()
  260. time_dict['all'] = end - start
  261. return filter_boxes, filter_rec_res, time_dict