db_postprocess.py 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. """
  2. This code is refered from:
  3. https://github.com/WenmuZhou/DBNet.pytorch/blob/master/post_processing/seg_detector_representer.py
  4. """
  5. from __future__ import absolute_import
  6. from __future__ import division
  7. from __future__ import print_function
  8. import numpy as np
  9. import cv2
  10. import torch
  11. from shapely.geometry import Polygon
  12. import pyclipper
  13. class DBPostProcess(object):
  14. """
  15. The post process for Differentiable Binarization (DB).
  16. """
  17. def __init__(self,
  18. thresh=0.3,
  19. box_thresh=0.7,
  20. max_candidates=1000,
  21. unclip_ratio=2.0,
  22. use_dilation=False,
  23. score_mode="fast",
  24. **kwargs):
  25. self.thresh = thresh
  26. self.box_thresh = box_thresh
  27. self.max_candidates = max_candidates
  28. self.unclip_ratio = unclip_ratio
  29. self.min_size = 3
  30. self.score_mode = score_mode
  31. assert score_mode in [
  32. "slow", "fast"
  33. ], "Score mode must be in [slow, fast] but got: {}".format(score_mode)
  34. self.dilation_kernel = None if not use_dilation else np.array(
  35. [[1, 1], [1, 1]])
  36. def boxes_from_bitmap(self, pred, _bitmap, dest_width, dest_height):
  37. '''
  38. _bitmap: single map with shape (1, H, W),
  39. whose values are binarized as {0, 1}
  40. '''
  41. bitmap = _bitmap
  42. height, width = bitmap.shape
  43. outs = cv2.findContours((bitmap * 255).astype(np.uint8), cv2.RETR_LIST,
  44. cv2.CHAIN_APPROX_SIMPLE)
  45. if len(outs) == 3:
  46. img, contours, _ = outs[0], outs[1], outs[2]
  47. elif len(outs) == 2:
  48. contours, _ = outs[0], outs[1]
  49. num_contours = min(len(contours), self.max_candidates)
  50. boxes = []
  51. scores = []
  52. for index in range(num_contours):
  53. contour = contours[index]
  54. points, sside = self.get_mini_boxes(contour)
  55. if sside < self.min_size:
  56. continue
  57. points = np.array(points)
  58. if self.score_mode == "fast":
  59. score = self.box_score_fast(pred, points.reshape(-1, 2))
  60. else:
  61. score = self.box_score_slow(pred, contour)
  62. if self.box_thresh > score:
  63. continue
  64. box = self.unclip(points).reshape(-1, 1, 2)
  65. box, sside = self.get_mini_boxes(box)
  66. if sside < self.min_size + 2:
  67. continue
  68. box = np.array(box)
  69. box[:, 0] = np.clip(
  70. np.round(box[:, 0] / width * dest_width), 0, dest_width)
  71. box[:, 1] = np.clip(
  72. np.round(box[:, 1] / height * dest_height), 0, dest_height)
  73. boxes.append(box.astype(np.int16))
  74. scores.append(score)
  75. return np.array(boxes, dtype=np.int16), scores
  76. def unclip(self, box):
  77. unclip_ratio = self.unclip_ratio
  78. poly = Polygon(box)
  79. distance = poly.area * unclip_ratio / poly.length
  80. offset = pyclipper.PyclipperOffset()
  81. offset.AddPath(box, pyclipper.JT_ROUND, pyclipper.ET_CLOSEDPOLYGON)
  82. expanded = np.array(offset.Execute(distance))
  83. return expanded
  84. def get_mini_boxes(self, contour):
  85. bounding_box = cv2.minAreaRect(contour)
  86. points = sorted(list(cv2.boxPoints(bounding_box)), key=lambda x: x[0])
  87. index_1, index_2, index_3, index_4 = 0, 1, 2, 3
  88. if points[1][1] > points[0][1]:
  89. index_1 = 0
  90. index_4 = 1
  91. else:
  92. index_1 = 1
  93. index_4 = 0
  94. if points[3][1] > points[2][1]:
  95. index_2 = 2
  96. index_3 = 3
  97. else:
  98. index_2 = 3
  99. index_3 = 2
  100. box = [
  101. points[index_1], points[index_2], points[index_3], points[index_4]
  102. ]
  103. return box, min(bounding_box[1])
  104. def box_score_fast(self, bitmap, _box):
  105. '''
  106. box_score_fast: use bbox mean score as the mean score
  107. '''
  108. h, w = bitmap.shape[:2]
  109. box = _box.copy()
  110. xmin = np.clip(np.floor(box[:, 0].min()).astype(np.int if 'int' in np.__dict__ else np.int32), 0, w - 1)
  111. xmax = np.clip(np.ceil(box[:, 0].max()).astype(np.int if 'int' in np.__dict__ else np.int32), 0, w - 1)
  112. ymin = np.clip(np.floor(box[:, 1].min()).astype(np.int if 'int' in np.__dict__ else np.int32), 0, h - 1)
  113. ymax = np.clip(np.ceil(box[:, 1].max()).astype(np.int if 'int' in np.__dict__ else np.int32), 0, h - 1)
  114. mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
  115. box[:, 0] = box[:, 0] - xmin
  116. box[:, 1] = box[:, 1] - ymin
  117. cv2.fillPoly(mask, box.reshape(1, -1, 2).astype(np.int32), 1)
  118. return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
  119. def box_score_slow(self, bitmap, contour):
  120. '''
  121. box_score_slow: use polyon mean score as the mean score
  122. '''
  123. h, w = bitmap.shape[:2]
  124. contour = contour.copy()
  125. contour = np.reshape(contour, (-1, 2))
  126. xmin = np.clip(np.min(contour[:, 0]), 0, w - 1)
  127. xmax = np.clip(np.max(contour[:, 0]), 0, w - 1)
  128. ymin = np.clip(np.min(contour[:, 1]), 0, h - 1)
  129. ymax = np.clip(np.max(contour[:, 1]), 0, h - 1)
  130. mask = np.zeros((ymax - ymin + 1, xmax - xmin + 1), dtype=np.uint8)
  131. contour[:, 0] = contour[:, 0] - xmin
  132. contour[:, 1] = contour[:, 1] - ymin
  133. cv2.fillPoly(mask, contour.reshape(1, -1, 2).astype(np.int32), 1)
  134. return cv2.mean(bitmap[ymin:ymax + 1, xmin:xmax + 1], mask)[0]
  135. def __call__(self, outs_dict, shape_list):
  136. pred = outs_dict['maps']
  137. if isinstance(pred, torch.Tensor):
  138. pred = pred.cpu().numpy()
  139. pred = pred[:, 0, :, :]
  140. segmentation = pred > self.thresh
  141. boxes_batch = []
  142. for batch_index in range(pred.shape[0]):
  143. src_h, src_w, ratio_h, ratio_w = shape_list[batch_index]
  144. if self.dilation_kernel is not None:
  145. mask = cv2.dilate(
  146. np.array(segmentation[batch_index]).astype(np.uint8),
  147. self.dilation_kernel)
  148. else:
  149. mask = segmentation[batch_index]
  150. boxes, scores = self.boxes_from_bitmap(pred[batch_index], mask,
  151. src_w, src_h)
  152. boxes_batch.append({'points': boxes})
  153. return boxes_batch