processors.py 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import os.path as osp
  16. from typing import List
  17. import re
  18. import numpy as np
  19. from PIL import Image
  20. import cv2
  21. import math
  22. import json
  23. import tempfile
  24. from tokenizers import Tokenizer as TokenizerFast
  25. from ....utils import logging
  26. class OCRReisizeNormImg:
  27. """for ocr image resize and normalization"""
  28. def __init__(self, rec_image_shape=[3, 48, 320]):
  29. super().__init__()
  30. self.rec_image_shape = rec_image_shape
  31. def resize_norm_img(self, img, max_wh_ratio):
  32. """resize and normalize the img"""
  33. imgC, imgH, imgW = self.rec_image_shape
  34. assert imgC == img.shape[2]
  35. imgW = int((imgH * max_wh_ratio))
  36. h, w = img.shape[:2]
  37. ratio = w / float(h)
  38. if math.ceil(imgH * ratio) > imgW:
  39. resized_w = imgW
  40. else:
  41. resized_w = int(math.ceil(imgH * ratio))
  42. resized_image = cv2.resize(img, (resized_w, imgH))
  43. resized_image = resized_image.astype("float32")
  44. resized_image = resized_image.transpose((2, 0, 1)) / 255
  45. resized_image -= 0.5
  46. resized_image /= 0.5
  47. padding_im = np.zeros((imgC, imgH, imgW), dtype=np.float32)
  48. padding_im[:, :, 0:resized_w] = resized_image
  49. return padding_im
  50. def __call__(self, imgs):
  51. """apply"""
  52. return [self.resize(img) for img in imgs]
  53. def resize(self, img):
  54. imgC, imgH, imgW = self.rec_image_shape
  55. max_wh_ratio = imgW / imgH
  56. h, w = img.shape[:2]
  57. wh_ratio = w * 1.0 / h
  58. max_wh_ratio = max(max_wh_ratio, wh_ratio)
  59. img = self.resize_norm_img(img, max_wh_ratio)
  60. return img
  61. class BaseRecLabelDecode:
  62. """Convert between text-label and text-index"""
  63. def __init__(self, character_str=None, use_space_char=True):
  64. super().__init__()
  65. self.reverse = False
  66. character_list = (
  67. list(character_str)
  68. if character_str is not None
  69. else list("0123456789abcdefghijklmnopqrstuvwxyz")
  70. )
  71. if use_space_char:
  72. character_list.append(" ")
  73. character_list = self.add_special_char(character_list)
  74. self.dict = {}
  75. for i, char in enumerate(character_list):
  76. self.dict[char] = i
  77. self.character = character_list
  78. def pred_reverse(self, pred):
  79. """pred_reverse"""
  80. pred_re = []
  81. c_current = ""
  82. for c in pred:
  83. if not bool(re.search("[a-zA-Z0-9 :*./%+-]", c)):
  84. if c_current != "":
  85. pred_re.append(c_current)
  86. pred_re.append(c)
  87. c_current = ""
  88. else:
  89. c_current += c
  90. if c_current != "":
  91. pred_re.append(c_current)
  92. return "".join(pred_re[::-1])
  93. def add_special_char(self, character_list):
  94. """add_special_char"""
  95. return character_list
  96. def decode(self, text_index, text_prob=None, is_remove_duplicate=False):
  97. """convert text-index into text-label."""
  98. result_list = []
  99. ignored_tokens = self.get_ignored_tokens()
  100. batch_size = len(text_index)
  101. for batch_idx in range(batch_size):
  102. selection = np.ones(len(text_index[batch_idx]), dtype=bool)
  103. if is_remove_duplicate:
  104. selection[1:] = text_index[batch_idx][1:] != text_index[batch_idx][:-1]
  105. for ignored_token in ignored_tokens:
  106. selection &= text_index[batch_idx] != ignored_token
  107. char_list = [
  108. self.character[text_id] for text_id in text_index[batch_idx][selection]
  109. ]
  110. if text_prob is not None:
  111. conf_list = text_prob[batch_idx][selection]
  112. else:
  113. conf_list = [1] * len(selection)
  114. if len(conf_list) == 0:
  115. conf_list = [0]
  116. text = "".join(char_list)
  117. if self.reverse: # for arabic rec
  118. text = self.pred_reverse(text)
  119. result_list.append((text, np.mean(conf_list).tolist()))
  120. return result_list
  121. def get_ignored_tokens(self):
  122. """get_ignored_tokens"""
  123. return [0] # for ctc blank
  124. def __call__(self, pred):
  125. """apply"""
  126. preds = np.array(pred)
  127. if isinstance(preds, tuple) or isinstance(preds, list):
  128. preds = preds[-1]
  129. preds_idx = preds.argmax(axis=-1)
  130. preds_prob = preds.max(axis=-1)
  131. text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
  132. texts = []
  133. scores = []
  134. for t in text:
  135. texts.append(t[0])
  136. scores.append(t[1])
  137. return texts, scores
  138. class CTCLabelDecode(BaseRecLabelDecode):
  139. """Convert between text-label and text-index"""
  140. def __init__(self, character_list=None, use_space_char=True):
  141. super().__init__(character_list, use_space_char=use_space_char)
  142. def __call__(self, pred):
  143. """apply"""
  144. preds = np.array(pred[0])
  145. preds_idx = preds.argmax(axis=-1)
  146. preds_prob = preds.max(axis=-1)
  147. text = self.decode(preds_idx, preds_prob, is_remove_duplicate=True)
  148. texts = []
  149. scores = []
  150. for t in text:
  151. texts.append(t[0])
  152. scores.append(t[1])
  153. return texts, scores
  154. def add_special_char(self, character_list):
  155. """add_special_char"""
  156. character_list = ["blank"] + character_list
  157. return character_list
  158. class ToBatch:
  159. """A class for batching and padding images to a uniform width."""
  160. def __pad_imgs(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  161. """Pad images to the maximum width in the batch.
  162. Args:
  163. imgs (list of np.ndarrays): List of images to pad.
  164. Returns:
  165. list of np.ndarrays: List of padded images.
  166. """
  167. max_width = max(img.shape[2] for img in imgs)
  168. padded_imgs = []
  169. for img in imgs:
  170. _, height, width = img.shape
  171. pad_width = max_width - width
  172. padded_img = np.pad(
  173. img,
  174. ((0, 0), (0, 0), (0, pad_width)),
  175. mode="constant",
  176. constant_values=0,
  177. )
  178. padded_imgs.append(padded_img)
  179. return padded_imgs
  180. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  181. """Call method to pad images and stack them into a batch.
  182. Args:
  183. imgs (list of np.ndarrays): List of images to process.
  184. Returns:
  185. list of np.ndarrays: List containing a stacked tensor of the padded images.
  186. """
  187. imgs = self.__pad_imgs(imgs)
  188. return [np.stack(imgs, axis=0).astype(dtype=np.float32, copy=False)]