processors.py 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999
  1. # Copyright (c) 2024 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import json
  15. import math
  16. import re
  17. from typing import Any, Dict, List, Optional, Tuple, Union
  18. import numpy as np
  19. from PIL import Image, ImageOps
  20. from ....utils.deps import class_requires_deps, is_dep_available
  21. from ...utils.benchmark import benchmark
  22. if is_dep_available("opencv-contrib-python"):
  23. import cv2
  24. if is_dep_available("tokenizers"):
  25. from tokenizers import AddedToken
  26. from tokenizers import Tokenizer as TokenizerFast
  27. @benchmark.timeit
  28. @class_requires_deps("opencv-contrib-python")
  29. class MinMaxResize:
  30. """Class for resizing images to be within specified minimum and maximum dimensions, with padding and normalization."""
  31. def __init__(
  32. self,
  33. min_dimensions: Optional[List[int]] = [32, 32],
  34. max_dimensions: Optional[List[int]] = [672, 192],
  35. **kwargs,
  36. ) -> None:
  37. """Initializes the MinMaxResize class with minimum and maximum dimensions.
  38. Args:
  39. min_dimensions (list of int, optional): Minimum dimensions (width, height). Defaults to [32, 32].
  40. max_dimensions (list of int, optional): Maximum dimensions (width, height). Defaults to [672, 192].
  41. **kwargs: Additional keyword arguments for future expansion.
  42. """
  43. self.min_dimensions = min_dimensions
  44. self.max_dimensions = max_dimensions
  45. def pad_(self, img: Image.Image, divable: int = 32) -> Image.Image:
  46. """Pads the image to ensure its dimensions are divisible by a specified value.
  47. Args:
  48. img (PIL.Image.Image): The input image.
  49. divable (int, optional): The value by which the dimensions should be divisible. Defaults to 32.
  50. Returns:
  51. PIL.Image.Image: The padded image.
  52. """
  53. threshold = 128
  54. data = np.array(img.convert("LA"))
  55. if data[..., -1].var() == 0:
  56. data = (data[..., 0]).astype(np.uint8)
  57. else:
  58. data = (255 - data[..., -1]).astype(np.uint8)
  59. data = (data - data.min()) / (data.max() - data.min()) * 255
  60. if data.mean() > threshold:
  61. # To invert the text to white
  62. gray = 255 * (data < threshold).astype(np.uint8)
  63. else:
  64. gray = 255 * (data > threshold).astype(np.uint8)
  65. data = 255 - data
  66. coords = cv2.findNonZero(gray) # Find all non-zero points (text)
  67. a, b, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box
  68. rect = data[b : b + h, a : a + w]
  69. im = Image.fromarray(rect).convert("L")
  70. dims = []
  71. for x in [w, h]:
  72. div, mod = divmod(x, divable)
  73. dims.append(divable * (div + (1 if mod > 0 else 0)))
  74. padded = Image.new("L", dims, 255)
  75. padded.paste(im, (0, 0, im.size[0], im.size[1]))
  76. return padded
  77. def minmax_size_(
  78. self,
  79. img: Image.Image,
  80. max_dimensions: Optional[List[int]],
  81. min_dimensions: Optional[List[int]],
  82. ) -> Image.Image:
  83. """Resizes the image to be within the specified minimum and maximum dimensions.
  84. Args:
  85. img (PIL.Image.Image): The input image.
  86. max_dimensions (list of int or None): Maximum dimensions (width, height).
  87. min_dimensions (list of int or None): Minimum dimensions (width, height).
  88. Returns:
  89. PIL.Image.Image: The resized image.
  90. """
  91. if max_dimensions is not None:
  92. ratios = [a / b for a, b in zip(img.size, max_dimensions)]
  93. if any([r > 1 for r in ratios]):
  94. size = np.array(img.size) // max(ratios)
  95. img = img.resize(tuple(size.astype(int)), Image.BILINEAR)
  96. if min_dimensions is not None:
  97. # hypothesis: there is a dim in img smaller than min_dimensions, and return a proper dim >= min_dimensions
  98. padded_size = [
  99. max(img_dim, min_dim)
  100. for img_dim, min_dim in zip(img.size, min_dimensions)
  101. ]
  102. if padded_size != list(img.size): # assert hypothesis
  103. padded_im = Image.new("L", padded_size, 255)
  104. padded_im.paste(img, img.getbbox())
  105. img = padded_im
  106. return img
  107. def resize(self, img: np.ndarray) -> np.ndarray:
  108. """Resizes the input image according to the specified minimum and maximum dimensions.
  109. Args:
  110. img (np.ndarray): The input image as a numpy array.
  111. Returns:
  112. np.ndarray: The resized image as a numpy array with three channels.
  113. """
  114. h, w = img.shape[:2]
  115. if (
  116. self.min_dimensions[0] <= w <= self.max_dimensions[0]
  117. and self.min_dimensions[1] <= h <= self.max_dimensions[1]
  118. ):
  119. return img
  120. else:
  121. img = Image.fromarray(np.uint8(img))
  122. img = self.minmax_size_(
  123. self.pad_(img), self.max_dimensions, self.min_dimensions
  124. )
  125. img = np.array(img)
  126. img = np.dstack((img, img, img))
  127. return img
  128. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  129. """Applies the resize method to a list of images.
  130. Args:
  131. imgs (list of np.ndarray): The list of input images as numpy arrays.
  132. Returns:
  133. list of np.ndarray: The list of resized images as numpy arrays with three channels.
  134. """
  135. return [self.resize(img) for img in imgs]
  136. @benchmark.timeit
  137. @class_requires_deps("opencv-contrib-python")
  138. class LatexTestTransform:
  139. """
  140. A transform class for processing images according to Latex test requirements.
  141. """
  142. def __init__(self, **kwargs) -> None:
  143. """
  144. Initialize the transform with default number of output channels.
  145. """
  146. super().__init__()
  147. self.num_output_channels = 3
  148. def transform(self, img: np.ndarray) -> np.ndarray:
  149. """
  150. Convert the input image to grayscale, squeeze it, and merge to create an output
  151. image with the specified number of output channels.
  152. Parameters:
  153. img (np.array): The input image.
  154. Returns:
  155. np.array: The transformed image.
  156. """
  157. grayscale_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  158. squeezed = np.squeeze(grayscale_image)
  159. return cv2.merge([squeezed] * self.num_output_channels)
  160. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  161. """
  162. Apply the transform to a list of images.
  163. Parameters:
  164. imgs (list of np.array): The list of input images.
  165. Returns:
  166. list of np.array: The list of transformed images.
  167. """
  168. return [self.transform(img) for img in imgs]
  169. @benchmark.timeit
  170. class LatexImageFormat:
  171. """Class for formatting images to a specific format suitable for LaTeX."""
  172. def __init__(self, **kwargs) -> None:
  173. """Initializes the LatexImageFormat class with optional keyword arguments."""
  174. super().__init__()
  175. def format(self, img: np.ndarray) -> np.ndarray:
  176. """Formats a single image to the LaTeX-compatible format.
  177. Args:
  178. img (numpy.ndarray): The input image as a numpy array.
  179. Returns:
  180. numpy.ndarray: The formatted image as a numpy array with an added dimension for color.
  181. """
  182. im_h, im_w = img.shape[:2]
  183. divide_h = math.ceil(im_h / 16) * 16
  184. divide_w = math.ceil(im_w / 16) * 16
  185. img = img[:, :, 0]
  186. img = np.pad(
  187. img, ((0, divide_h - im_h), (0, divide_w - im_w)), constant_values=(1, 1)
  188. )
  189. img_expanded = img[:, :, np.newaxis].transpose(2, 0, 1)
  190. return img_expanded[np.newaxis, :]
  191. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  192. """Applies the format method to a list of images.
  193. Args:
  194. imgs (list of numpy.ndarray): A list of input images as numpy arrays.
  195. Returns:
  196. list of numpy.ndarray: A list of formatted images as numpy arrays.
  197. """
  198. return [self.format(img) for img in imgs]
  199. @benchmark.timeit
  200. class NormalizeImage(object):
  201. """Normalize an image by subtracting the mean and dividing by the standard deviation.
  202. Args:
  203. scale (float or str): The scale factor to apply to the image. If a string is provided, it will be evaluated as a Python expression.
  204. mean (list of float): The mean values to subtract from each channel. Defaults to [0.485, 0.456, 0.406].
  205. std (list of float): The standard deviation values to divide by for each channel. Defaults to [0.229, 0.224, 0.225].
  206. order (str): The order of dimensions for the mean and std. 'chw' for channels-height-width, 'hwc' for height-width-channels. Defaults to 'chw'.
  207. **kwargs: Additional keyword arguments that may be used by subclasses.
  208. Attributes:
  209. scale (float): The scale factor applied to the image.
  210. mean (numpy.ndarray): The mean values reshaped according to the specified order.
  211. std (numpy.ndarray): The standard deviation values reshaped according to the specified order.
  212. """
  213. def __init__(
  214. self,
  215. scale: Optional[Union[float, str]] = None,
  216. mean: Optional[List[float]] = None,
  217. std: Optional[List[float]] = None,
  218. order: str = "chw",
  219. **kwargs,
  220. ) -> None:
  221. if isinstance(scale, str):
  222. scale = eval(scale)
  223. self.scale = np.float32(scale if scale is not None else 1.0 / 255.0)
  224. mean = mean if mean is not None else [0.485, 0.456, 0.406]
  225. std = std if std is not None else [0.229, 0.224, 0.225]
  226. shape = (3, 1, 1) if order == "chw" else (1, 1, 3)
  227. self.mean = np.array(mean).reshape(shape).astype("float32")
  228. self.std = np.array(std).reshape(shape).astype("float32")
  229. def normalize(self, img: Union[np.ndarray, Image.Image]) -> np.ndarray:
  230. from PIL import Image
  231. if isinstance(img, Image.Image):
  232. img = np.array(img)
  233. assert isinstance(img, np.ndarray), "invalid input 'img' in NormalizeImage"
  234. img = (img.astype("float32") * self.scale - self.mean) / self.std
  235. return img
  236. def __call__(self, imgs: List[Union[np.ndarray, Image.Image]]) -> List[np.ndarray]:
  237. """Apply normalization to a list of images."""
  238. return [self.normalize(img) for img in imgs]
  239. @benchmark.timeit
  240. class ToBatch(object):
  241. """A class for batching images."""
  242. def __init__(self, **kwargs) -> None:
  243. """Initializes the ToBatch object."""
  244. super(ToBatch, self).__init__()
  245. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  246. """Concatenates a list of images into a single batch.
  247. Args:
  248. imgs (list): A list of image arrays to be concatenated.
  249. Returns:
  250. list: A list containing the concatenated batch of images wrapped in another list (to comply with common batch processing formats).
  251. """
  252. batch_imgs = np.concatenate(imgs)
  253. batch_imgs = batch_imgs.copy()
  254. x = [batch_imgs]
  255. return x
  256. @benchmark.timeit
  257. @class_requires_deps("tokenizers")
  258. class LaTeXOCRDecode(object):
  259. """Class for decoding LaTeX OCR tokens based on a provided character list."""
  260. def __init__(self, character_list: List[str], **kwargs) -> None:
  261. """Initializes the LaTeXOCRDecode object.
  262. Args:
  263. character_list (list): The list of characters to use for tokenization.
  264. **kwargs: Additional keyword arguments for initialization.
  265. """
  266. super(LaTeXOCRDecode, self).__init__()
  267. fast_tokenizer_str = json.dumps(character_list)
  268. fast_tokenizer_buffer = fast_tokenizer_str.encode("utf-8")
  269. self.tokenizer = TokenizerFast.from_buffer(fast_tokenizer_buffer)
  270. def post_process(self, s: str) -> str:
  271. """Post-processes the decoded LaTeX string.
  272. Args:
  273. s (str): The decoded LaTeX string to post-process.
  274. Returns:
  275. str: The post-processed LaTeX string.
  276. """
  277. text_reg = r"(\\(operatorname|mathrm|text|mathbf)\s?\*? {.*?})"
  278. letter = "[a-zA-Z]"
  279. noletter = "[\W_^\d]"
  280. names = [x[0].replace(" ", "") for x in re.findall(text_reg, s)]
  281. s = re.sub(text_reg, lambda match: str(names.pop(0)), s)
  282. news = s
  283. while True:
  284. s = news
  285. news = re.sub(r"(?!\\ )(%s)\s+?(%s)" % (noletter, noletter), r"\1\2", s)
  286. news = re.sub(r"(?!\\ )(%s)\s+?(%s)" % (noletter, letter), r"\1\2", news)
  287. news = re.sub(r"(%s)\s+?(%s)" % (letter, noletter), r"\1\2", news)
  288. if news == s:
  289. break
  290. return s
  291. def decode(self, tokens: np.ndarray) -> List[str]:
  292. """Decodes the provided tokens into LaTeX strings.
  293. Args:
  294. tokens (np.array): The tokens to decode.
  295. Returns:
  296. list: The decoded LaTeX strings.
  297. """
  298. if len(tokens.shape) == 1:
  299. tokens = tokens[None, :]
  300. dec = [self.tokenizer.decode(tok) for tok in tokens]
  301. dec_str_list = [
  302. "".join(detok.split(" "))
  303. .replace("臓", " ")
  304. .replace("[EOS]", "")
  305. .replace("[BOS]", "")
  306. .replace("[PAD]", "")
  307. .strip()
  308. for detok in dec
  309. ]
  310. return [self.post_process(dec_str) for dec_str in dec_str_list]
  311. def __call__(
  312. self,
  313. preds: np.ndarray,
  314. label: Optional[np.ndarray] = None,
  315. mode: str = "eval",
  316. *args,
  317. **kwargs,
  318. ) -> Tuple[List[str], List[str]]:
  319. """Calls the object with the provided predictions and label.
  320. Args:
  321. preds (np.array): The predictions to decode.
  322. label (np.array, optional): The labels to decode. Defaults to None.
  323. mode (str): The mode to run in, either 'train' or 'eval'. Defaults to 'eval'.
  324. *args: Positional arguments to pass.
  325. **kwargs: Keyword arguments to pass.
  326. Returns:
  327. tuple or list: The decoded text and optionally the decoded label.
  328. """
  329. if mode == "train":
  330. preds_idx = np.array(preds.argmax(axis=2))
  331. text = self.decode(preds_idx)
  332. else:
  333. text = self.decode(np.array(preds))
  334. if label is None:
  335. return text
  336. label = self.decode(np.array(label))
  337. return text, label
  338. @benchmark.timeit
  339. @class_requires_deps("opencv-contrib-python")
  340. class UniMERNetImgDecode(object):
  341. """Class for decoding images for UniMERNet, including cropping margins, resizing, and padding."""
  342. def __init__(
  343. self, input_size: Tuple[int, int], random_padding: bool = False, **kwargs
  344. ) -> None:
  345. """Initializes the UniMERNetImgDecode class with input size and random padding options.
  346. Args:
  347. input_size (tuple): The desired input size for the images (height, width).
  348. random_padding (bool): Whether to use random padding for resizing.
  349. **kwargs: Additional keyword arguments."""
  350. self.input_size = input_size
  351. self.random_padding = random_padding
  352. def crop_margin(self, img: Image.Image) -> Image.Image:
  353. """Crops the margin of the image based on grayscale thresholding.
  354. Args:
  355. img (PIL.Image.Image): The input image.
  356. Returns:
  357. PIL.Image.Image: The cropped image."""
  358. data = np.array(img.convert("L"))
  359. data = data.astype(np.uint8)
  360. max_val = data.max()
  361. min_val = data.min()
  362. if max_val == min_val:
  363. return img
  364. data = (data - min_val) / (max_val - min_val) * 255
  365. gray = 255 * (data < 200).astype(np.uint8)
  366. coords = cv2.findNonZero(gray) # Find all non-zero points (text)
  367. a, b, w, h = cv2.boundingRect(coords) # Find minimum spanning bounding box
  368. return img.crop((a, b, w + a, h + b))
  369. def get_dimensions(self, img: Union[Image.Image, np.ndarray]) -> List[int]:
  370. """Gets the dimensions of the image.
  371. Args:
  372. img (PIL.Image.Image or numpy.ndarray): The input image.
  373. Returns:
  374. list: A list containing the number of channels, height, and width."""
  375. if hasattr(img, "getbands"):
  376. channels = len(img.getbands())
  377. else:
  378. channels = img.channels
  379. width, height = img.size
  380. return [channels, height, width]
  381. def _compute_resized_output_size(
  382. self,
  383. image_size: Tuple[int, int],
  384. size: Union[int, Tuple[int, int]],
  385. max_size: Optional[int] = None,
  386. ) -> List[int]:
  387. """Computes the resized output size of the image.
  388. Args:
  389. image_size (tuple): The original size of the image (height, width).
  390. size (int or tuple): The desired size for the smallest edge or both height and width.
  391. max_size (int, optional): The maximum allowed size for the longer edge.
  392. Returns:
  393. list: A list containing the new height and width."""
  394. if len(size) == 1: # specified size only for the smallest edge
  395. h, w = image_size
  396. short, long = (w, h) if w <= h else (h, w)
  397. requested_new_short = size if isinstance(size, int) else size[0]
  398. new_short, new_long = requested_new_short, int(
  399. requested_new_short * long / short
  400. )
  401. if max_size is not None:
  402. if max_size <= requested_new_short:
  403. raise ValueError(
  404. f"max_size = {max_size} must be strictly greater than the requested "
  405. f"size for the smaller edge size = {size}"
  406. )
  407. if new_long > max_size:
  408. new_short, new_long = int(max_size * new_short / new_long), max_size
  409. new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short)
  410. else: # specified both h and w
  411. new_w, new_h = size[1], size[0]
  412. return [new_h, new_w]
  413. def resize(
  414. self, img: Image.Image, size: Union[int, Tuple[int, int]]
  415. ) -> Image.Image:
  416. """Resizes the image to the specified size.
  417. Args:
  418. img (PIL.Image.Image): The input image.
  419. size (int or tuple): The desired size for the smallest edge or both height and width.
  420. Returns:
  421. PIL.Image.Image: The resized image."""
  422. _, image_height, image_width = self.get_dimensions(img)
  423. if isinstance(size, int):
  424. size = [size]
  425. max_size = None
  426. output_size = self._compute_resized_output_size(
  427. (image_height, image_width), size, max_size
  428. )
  429. img = img.resize(tuple(output_size[::-1]), resample=2)
  430. return img
  431. def img_decode(self, img: np.ndarray) -> Optional[np.ndarray]:
  432. """Decodes the image by cropping margins, resizing, and adding padding.
  433. Args:
  434. img (numpy.ndarray): The input image array.
  435. Returns:
  436. numpy.ndarray: The decoded image array."""
  437. try:
  438. img = self.crop_margin(Image.fromarray(img).convert("RGB"))
  439. except OSError:
  440. return
  441. if img.height == 0 or img.width == 0:
  442. return
  443. img = self.resize(img, min(self.input_size))
  444. img.thumbnail((self.input_size[1], self.input_size[0]))
  445. delta_width = self.input_size[1] - img.width
  446. delta_height = self.input_size[0] - img.height
  447. if self.random_padding:
  448. pad_width = np.random.randint(low=0, high=delta_width + 1)
  449. pad_height = np.random.randint(low=0, high=delta_height + 1)
  450. else:
  451. pad_width = delta_width // 2
  452. pad_height = delta_height // 2
  453. padding = (
  454. pad_width,
  455. pad_height,
  456. delta_width - pad_width,
  457. delta_height - pad_height,
  458. )
  459. return np.array(ImageOps.expand(img, padding))
  460. def __call__(self, imgs: List[np.ndarray]) -> List[Optional[np.ndarray]]:
  461. """Calls the img_decode method on a list of images.
  462. Args:
  463. imgs (list of numpy.ndarray): The list of input image arrays.
  464. Returns:
  465. list of numpy.ndarray: The list of decoded image arrays."""
  466. return [self.img_decode(img) for img in imgs]
  467. @benchmark.timeit
  468. @class_requires_deps("tokenizers")
  469. class UniMERNetDecode(object):
  470. """Class for decoding tokenized inputs using UniMERNet tokenizer.
  471. Attributes:
  472. SPECIAL_TOKENS_ATTRIBUTES (List[str]): List of special token attributes.
  473. model_input_names (List[str]): List of model input names.
  474. max_seq_len (int): Maximum sequence length.
  475. pad_token_id (int): ID for the padding token.
  476. bos_token_id (int): ID for the beginning-of-sequence token.
  477. eos_token_id (int): ID for the end-of-sequence token.
  478. padding_side (str): Padding side, either 'left' or 'right'.
  479. pad_token (str): Padding token.
  480. pad_token_type_id (int): Type ID for the padding token.
  481. pad_to_multiple_of (Optional[int]): If set, pad to a multiple of this value.
  482. tokenizer (TokenizerFast): Fast tokenizer instance.
  483. Args:
  484. character_list (Dict[str, Any]): Dictionary containing tokenizer configuration.
  485. **kwargs: Additional keyword arguments.
  486. """
  487. SPECIAL_TOKENS_ATTRIBUTES = [
  488. "bos_token",
  489. "eos_token",
  490. "unk_token",
  491. "sep_token",
  492. "pad_token",
  493. "cls_token",
  494. "mask_token",
  495. "additional_special_tokens",
  496. ]
  497. def __init__(
  498. self,
  499. character_list: Dict[str, Any],
  500. **kwargs,
  501. ) -> None:
  502. """Initializes the UniMERNetDecode class.
  503. Args:
  504. character_list (Dict[str, Any]): Dictionary containing tokenizer configuration.
  505. **kwargs: Additional keyword arguments.
  506. """
  507. self._unk_token = "<unk>"
  508. self._bos_token = "<s>"
  509. self._eos_token = "</s>"
  510. self._pad_token = "<pad>"
  511. self._sep_token = None
  512. self._cls_token = None
  513. self._mask_token = None
  514. self._additional_special_tokens = []
  515. self.model_input_names = ["input_ids", "token_type_ids", "attention_mask"]
  516. self.max_seq_len = 2048
  517. self.pad_token_id = 1
  518. self.bos_token_id = 0
  519. self.eos_token_id = 2
  520. self.padding_side = "right"
  521. self.pad_token_id = 1
  522. self.pad_token = "<pad>"
  523. self.pad_token_type_id = 0
  524. self.pad_to_multiple_of = None
  525. fast_tokenizer_str = json.dumps(character_list["fast_tokenizer_file"])
  526. fast_tokenizer_buffer = fast_tokenizer_str.encode("utf-8")
  527. self.tokenizer = TokenizerFast.from_buffer(fast_tokenizer_buffer)
  528. tokenizer_config = (
  529. character_list["tokenizer_config_file"]
  530. if "tokenizer_config_file" in character_list
  531. else None
  532. )
  533. added_tokens_decoder = {}
  534. added_tokens_map = {}
  535. if tokenizer_config is not None:
  536. init_kwargs = tokenizer_config
  537. if "added_tokens_decoder" in init_kwargs:
  538. for idx, token in init_kwargs["added_tokens_decoder"].items():
  539. if isinstance(token, dict):
  540. token = AddedToken(**token)
  541. if isinstance(token, AddedToken):
  542. added_tokens_decoder[int(idx)] = token
  543. added_tokens_map[str(token)] = token
  544. else:
  545. raise ValueError(
  546. f"Found a {token.__class__} in the saved `added_tokens_decoder`, should be a dictionary or an AddedToken instance"
  547. )
  548. init_kwargs["added_tokens_decoder"] = added_tokens_decoder
  549. added_tokens_decoder = init_kwargs.pop("added_tokens_decoder", {})
  550. tokens_to_add = [
  551. token
  552. for index, token in sorted(
  553. added_tokens_decoder.items(), key=lambda x: x[0]
  554. )
  555. if token not in added_tokens_decoder
  556. ]
  557. added_tokens_encoder = self.added_tokens_encoder(added_tokens_decoder)
  558. encoder = list(added_tokens_encoder.keys()) + [
  559. str(token) for token in tokens_to_add
  560. ]
  561. tokens_to_add += [
  562. token
  563. for token in self.all_special_tokens_extended
  564. if token not in encoder and token not in tokens_to_add
  565. ]
  566. if len(tokens_to_add) > 0:
  567. is_last_special = None
  568. tokens = []
  569. special_tokens = self.all_special_tokens
  570. for token in tokens_to_add:
  571. is_special = (
  572. (token.special or str(token) in special_tokens)
  573. if isinstance(token, AddedToken)
  574. else str(token) in special_tokens
  575. )
  576. if is_last_special is None or is_last_special == is_special:
  577. tokens.append(token)
  578. else:
  579. self._add_tokens(tokens, special_tokens=is_last_special)
  580. tokens = [token]
  581. is_last_special = is_special
  582. if tokens:
  583. self._add_tokens(tokens, special_tokens=is_last_special)
  584. def _add_tokens(
  585. self, new_tokens: "List[Union[AddedToken, str]]", special_tokens: bool = False
  586. ) -> "List[Union[AddedToken, str]]":
  587. """Adds new tokens to the tokenizer.
  588. Args:
  589. new_tokens (List[Union[AddedToken, str]]): Tokens to be added.
  590. special_tokens (bool): Indicates whether the tokens are special tokens.
  591. Returns:
  592. List[Union[AddedToken, str]]: added tokens.
  593. """
  594. if special_tokens:
  595. return self.tokenizer.add_special_tokens(new_tokens)
  596. return self.tokenizer.add_tokens(new_tokens)
  597. def added_tokens_encoder(
  598. self, added_tokens_decoder: "Dict[int, AddedToken]"
  599. ) -> Dict[str, int]:
  600. """Creates an encoder dictionary from added tokens.
  601. Args:
  602. added_tokens_decoder (Dict[int, AddedToken]): Dictionary mapping token IDs to tokens.
  603. Returns:
  604. Dict[str, int]: Dictionary mapping token strings to IDs.
  605. """
  606. return {
  607. k.content: v
  608. for v, k in sorted(added_tokens_decoder.items(), key=lambda item: item[0])
  609. }
  610. @property
  611. def all_special_tokens(self) -> List[str]:
  612. """Retrieves all special tokens.
  613. Returns:
  614. List[str]: List of all special tokens as strings.
  615. """
  616. all_toks = [str(s) for s in self.all_special_tokens_extended]
  617. return all_toks
  618. @property
  619. def all_special_tokens_extended(self) -> "List[Union[str, AddedToken]]":
  620. """Retrieves all special tokens, including extended ones.
  621. Returns:
  622. List[Union[str, AddedToken]]: List of all special tokens.
  623. """
  624. all_tokens = []
  625. seen = set()
  626. for value in self.special_tokens_map_extended.values():
  627. if isinstance(value, (list, tuple)):
  628. tokens_to_add = [token for token in value if str(token) not in seen]
  629. else:
  630. tokens_to_add = [value] if str(value) not in seen else []
  631. seen.update(map(str, tokens_to_add))
  632. all_tokens.extend(tokens_to_add)
  633. return all_tokens
  634. @property
  635. def special_tokens_map_extended(self) -> Dict[str, Union[str, List[str]]]:
  636. """Retrieves the extended map of special tokens.
  637. Returns:
  638. Dict[str, Union[str, List[str]]]: Dictionary mapping special token attributes to their values.
  639. """
  640. set_attr = {}
  641. for attr in self.SPECIAL_TOKENS_ATTRIBUTES:
  642. attr_value = getattr(self, "_" + attr)
  643. if attr_value:
  644. set_attr[attr] = attr_value
  645. return set_attr
  646. def convert_ids_to_tokens(
  647. self, ids: Union[int, List[int]], skip_special_tokens: bool = False
  648. ) -> Union[str, List[str]]:
  649. """Converts token IDs to token strings.
  650. Args:
  651. ids (Union[int, List[int]]): Token ID(s) to convert.
  652. skip_special_tokens (bool): Whether to skip special tokens during conversion.
  653. Returns:
  654. Union[str, List[str]]: Converted token string(s).
  655. """
  656. if isinstance(ids, int):
  657. return self.tokenizer.id_to_token(ids)
  658. tokens = []
  659. for index in ids:
  660. index = int(index)
  661. if skip_special_tokens and index in self.all_special_ids:
  662. continue
  663. tokens.append(self.tokenizer.id_to_token(index))
  664. return tokens
  665. def detokenize(self, tokens: List[List[int]]) -> List[List[str]]:
  666. """Detokenizes a list of token IDs back into strings.
  667. Args:
  668. tokens (List[List[int]]): List of token ID lists.
  669. Returns:
  670. List[List[str]]: List of detokenized strings.
  671. """
  672. self.tokenizer.bos_token = "<s>"
  673. self.tokenizer.eos_token = "</s>"
  674. self.tokenizer.pad_token = "<pad>"
  675. toks = [self.convert_ids_to_tokens(tok) for tok in tokens]
  676. for b in range(len(toks)):
  677. for i in reversed(range(len(toks[b]))):
  678. if toks[b][i] is None:
  679. toks[b][i] = ""
  680. toks[b][i] = toks[b][i].replace("臓", " ").strip()
  681. if toks[b][i] in (
  682. [
  683. self.tokenizer.bos_token,
  684. self.tokenizer.eos_token,
  685. self.tokenizer.pad_token,
  686. ]
  687. ):
  688. del toks[b][i]
  689. return toks
  690. def token2str(self, token_ids: List[List[int]]) -> List[str]:
  691. """Converts a list of token IDs to strings.
  692. Args:
  693. token_ids (List[List[int]]): List of token ID lists.
  694. Returns:
  695. List[str]: List of converted strings.
  696. """
  697. generated_text = []
  698. for tok_id in token_ids:
  699. end_idx = np.argwhere(tok_id == 2)
  700. if len(end_idx) > 0:
  701. end_idx = int(end_idx[0][0])
  702. tok_id = tok_id[: end_idx + 1]
  703. generated_text.append(
  704. self.tokenizer.decode(tok_id, skip_special_tokens=True)
  705. )
  706. generated_text = [self.post_process(text) for text in generated_text]
  707. return generated_text
  708. def normalize(self, s: str) -> str:
  709. """Normalizes a string by removing unnecessary spaces.
  710. Args:
  711. s (str): String to normalize.
  712. Returns:
  713. str: Normalized string.
  714. """
  715. text_reg = r"(\\(operatorname|mathrm|text|mathbf)\s?\*? {.*?})"
  716. letter = "[a-zA-Z]"
  717. noletter = "[\W_^\d]"
  718. names = [x[0].replace(" ", "") for x in re.findall(text_reg, s)]
  719. s = re.sub(text_reg, lambda match: str(names.pop(0)), s)
  720. news = s
  721. while True:
  722. s = news
  723. news = re.sub(r"(?!\\ )(%s)\s+?(%s)" % (noletter, noletter), r"\1\2", s)
  724. news = re.sub(r"(?!\\ )(%s)\s+?(%s)" % (noletter, letter), r"\1\2", news)
  725. news = re.sub(r"(%s)\s+?(%s)" % (letter, noletter), r"\1\2", news)
  726. if news == s:
  727. break
  728. return s
  729. def remove_chinese_text_wrapping(self, formula):
  730. pattern = re.compile(r"\\text\s*{\s*([^}]*?[\u4e00-\u9fff]+[^}]*?)\s*}")
  731. def replacer(match):
  732. return match.group(1)
  733. replaced_formula = pattern.sub(replacer, formula)
  734. return replaced_formula.replace('"', "")
  735. def post_process(self, text: str) -> str:
  736. """Post-processes a string by fixing text and normalizing it.
  737. Args:
  738. text (str): String to post-process.
  739. Returns:
  740. str: Post-processed string.
  741. """
  742. from ftfy import fix_text
  743. text = self.remove_chinese_text_wrapping(text)
  744. text = fix_text(text)
  745. print("=" * 100)
  746. print(text)
  747. text = self.normalize(text)
  748. print(text)
  749. return text
  750. def __call__(
  751. self,
  752. preds: np.ndarray,
  753. label: Optional[np.ndarray] = None,
  754. mode: str = "eval",
  755. *args,
  756. **kwargs,
  757. ) -> Union[List[str], tuple]:
  758. """Processes predictions and optionally labels, returning the decoded text.
  759. Args:
  760. preds (np.ndarray): Model predictions.
  761. label (Optional[np.ndarray]): True labels, if available.
  762. mode (str): Mode of operation, either 'train' or 'eval'.
  763. Returns:
  764. Union[List[str], tuple]: Decoded text, optionally with labels.
  765. """
  766. if mode == "train":
  767. preds_idx = np.array(preds.argmax(axis=2))
  768. text = self.token2str(preds_idx)
  769. else:
  770. text = self.token2str(np.array(preds))
  771. if label is None:
  772. return text
  773. label = self.token2str(np.array(label))
  774. return text, label
  775. @benchmark.timeit
  776. @class_requires_deps("opencv-contrib-python")
  777. class UniMERNetTestTransform:
  778. """
  779. A class for transforming images according to UniMERNet test specifications.
  780. """
  781. def __init__(self, **kwargs) -> None:
  782. """
  783. Initializes the UniMERNetTestTransform class.
  784. """
  785. super().__init__()
  786. self.num_output_channels = 3
  787. def transform(self, img: np.ndarray) -> np.ndarray:
  788. """
  789. Transforms a single image for UniMERNet testing.
  790. Args:
  791. img (numpy.ndarray): The input image.
  792. Returns:
  793. numpy.ndarray: The transformed image.
  794. """
  795. mean = [0.7931, 0.7931, 0.7931]
  796. std = [0.1738, 0.1738, 0.1738]
  797. scale = float(1 / 255.0)
  798. shape = (1, 1, 3)
  799. mean = np.array(mean).reshape(shape).astype("float32")
  800. std = np.array(std).reshape(shape).astype("float32")
  801. img = (img.astype("float32") * scale - mean) / std
  802. grayscale_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
  803. squeezed = np.squeeze(grayscale_image)
  804. img = cv2.merge([squeezed] * self.num_output_channels)
  805. return img
  806. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  807. """
  808. Applies the transform to a list of images.
  809. Args:
  810. imgs (list of numpy.ndarray): The list of input images.
  811. Returns:
  812. list of numpy.ndarray: The list of transformed images.
  813. """
  814. return [self.transform(img) for img in imgs]
  815. @benchmark.timeit
  816. class UniMERNetImageFormat:
  817. """Class for formatting images to UniMERNet's required format."""
  818. def __init__(self, **kwargs) -> None:
  819. """Initializes the UniMERNetImageFormat instance."""
  820. # your init code
  821. def format(self, img: np.ndarray) -> np.ndarray:
  822. """Formats a single image to UniMERNet's required format.
  823. Args:
  824. img (numpy.ndarray): The input image to be formatted.
  825. Returns:
  826. numpy.ndarray: The formatted image.
  827. """
  828. im_h, im_w = img.shape[:2]
  829. divide_h = math.ceil(im_h / 32) * 32
  830. divide_w = math.ceil(im_w / 32) * 32
  831. img = img[:, :, 0]
  832. img = np.pad(
  833. img, ((0, divide_h - im_h), (0, divide_w - im_w)), constant_values=(1, 1)
  834. )
  835. img_expanded = img[:, :, np.newaxis].transpose(2, 0, 1)
  836. return img_expanded[np.newaxis, :]
  837. def __call__(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  838. """Applies the format method to a list of images.
  839. Args:
  840. imgs (list of numpy.ndarray): The list of input images to be formatted.
  841. Returns:
  842. list of numpy.ndarray: The list of formatted images.
  843. """
  844. return [self.format(img) for img in imgs]