common.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. import tempfile
  16. from pathlib import Path
  17. from copy import deepcopy
  18. import numpy as np
  19. import cv2
  20. from .....utils.cache import CACHE_DIR
  21. from ....utils.io import ImageReader, ImageWriter, PDFReader
  22. from ...utils.mixin import BatchSizeMixin
  23. from ...base import BaseComponent
  24. from ..read_data import _BaseRead
  25. from . import funcs as F
  26. __all__ = [
  27. "ReadImage",
  28. "Flip",
  29. "Crop",
  30. "Resize",
  31. "ResizeByLong",
  32. "ResizeByShort",
  33. "Pad",
  34. "Normalize",
  35. "ToCHWImage",
  36. "PadStride",
  37. ]
  38. def _check_image_size(input_):
  39. """check image size"""
  40. if not (
  41. isinstance(input_, (list, tuple))
  42. and len(input_) == 2
  43. and isinstance(input_[0], int)
  44. and isinstance(input_[1], int)
  45. ):
  46. raise TypeError(f"{input_} cannot represent a valid image size.")
  47. class ReadImage(_BaseRead):
  48. """Load image from the file."""
  49. INPUT_KEYS = ["img"]
  50. OUTPUT_KEYS = ["img", "img_size", "ori_img", "ori_img_size"]
  51. DEAULT_INPUTS = {"img": "img"}
  52. DEAULT_OUTPUTS = {
  53. "img": "img",
  54. "input_path": "input_path",
  55. "img_size": "img_size",
  56. "ori_img": "ori_img",
  57. "ori_img_size": "ori_img_size",
  58. }
  59. _FLAGS_DICT = {
  60. "BGR": cv2.IMREAD_COLOR,
  61. "RGB": cv2.IMREAD_COLOR,
  62. "GRAY": cv2.IMREAD_GRAYSCALE,
  63. }
  64. SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp", "PDF", "pdf"]
  65. def __init__(self, batch_size=1, format="BGR"):
  66. """
  67. Initialize the instance.
  68. Args:
  69. format (str, optional): Target color format to convert the image to.
  70. Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
  71. """
  72. super().__init__(batch_size)
  73. self.format = format
  74. flags = self._FLAGS_DICT[self.format]
  75. self._img_reader = ImageReader(backend="opencv", flags=flags)
  76. self._pdf_reader = PDFReader()
  77. self._writer = ImageWriter(backend="opencv")
  78. def apply(self, img):
  79. """apply"""
  80. if not isinstance(img, str):
  81. with tempfile.NamedTemporaryFile(suffix=".png", delete=True) as temp_file:
  82. img_path = Path(temp_file.name)
  83. self._writer.write(img_path, img)
  84. yield [
  85. {
  86. "input_path": img_path,
  87. "img": img,
  88. "img_size": [img.shape[1], img.shape[0]],
  89. "ori_img": deepcopy(img),
  90. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  91. }
  92. ]
  93. else:
  94. file_path = img
  95. file_path = self._download_from_url(file_path)
  96. file_list = self._get_files_list(file_path)
  97. batch = []
  98. for file_path in file_list:
  99. img = self._read_img(file_path)
  100. batch.extend(img)
  101. if len(batch) >= self.batch_size:
  102. yield batch
  103. batch = []
  104. if len(batch) > 0:
  105. yield batch
  106. def _read(self, file_path):
  107. if file_path:
  108. return self._read_pdf(file_path)
  109. else:
  110. return self._read_img(file_path)
  111. def _read_img(self, img_path):
  112. blob = self._img_reader.read(img_path)
  113. if blob is None:
  114. raise Exception("Image read Error")
  115. if self.format == "RGB":
  116. if blob.ndim != 3:
  117. raise RuntimeError("Array is not 3-dimensional.")
  118. # BGR to RGB
  119. blob = blob[..., ::-1]
  120. return [
  121. {
  122. "input_path": img_path,
  123. "img": blob,
  124. "img_size": [blob.shape[1], blob.shape[0]],
  125. "ori_img": deepcopy(blob),
  126. "ori_img_size": deepcopy([blob.shape[1], blob.shape[0]]),
  127. }
  128. ]
  129. def _read_pdf(self, pdf_path):
  130. img_list = self._pdf_reader.read(pdf_path)
  131. return [
  132. {
  133. "input_path": pdf_path,
  134. "img": img,
  135. "img_size": [img.shape[1], img.shape[0]],
  136. "ori_img": deepcopy(img),
  137. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  138. }
  139. for img in img_list
  140. ]
  141. class GetImageInfo(BaseComponent):
  142. """Get Image Info"""
  143. INPUT_KEYS = "img"
  144. OUTPUT_KEYS = "img_size"
  145. DEAULT_INPUTS = {"img": "img"}
  146. DEAULT_OUTPUTS = {"img_size": "img_size"}
  147. def __init__(self):
  148. super().__init__()
  149. def apply(self, img):
  150. """apply"""
  151. return {"img_size": [img.shape[1], img.shape[0]]}
  152. class Flip(BaseComponent):
  153. """Flip the image vertically or horizontally."""
  154. INPUT_KEYS = "img"
  155. OUTPUT_KEYS = "img"
  156. DEAULT_INPUTS = {"img": "img"}
  157. DEAULT_OUTPUTS = {"img": "img"}
  158. def __init__(self, mode="H"):
  159. """
  160. Initialize the instance.
  161. Args:
  162. mode (str, optional): 'H' for horizontal flipping and 'V' for vertical
  163. flipping. Default: 'H'.
  164. """
  165. super().__init__()
  166. if mode not in ("H", "V"):
  167. raise ValueError("`mode` should be 'H' or 'V'.")
  168. self.mode = mode
  169. def apply(self, img):
  170. """apply"""
  171. if self.mode == "H":
  172. img = F.flip_h(img)
  173. elif self.mode == "V":
  174. img = F.flip_v(img)
  175. return {"img": img}
  176. class Crop(BaseComponent):
  177. """Crop region from the image."""
  178. INPUT_KEYS = "img"
  179. OUTPUT_KEYS = ["img", "img_size"]
  180. DEAULT_INPUTS = {"img": "img"}
  181. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  182. def __init__(self, crop_size, mode="C"):
  183. """
  184. Initialize the instance.
  185. Args:
  186. crop_size (list|tuple|int): Width and height of the region to crop.
  187. mode (str, optional): 'C' for cropping the center part and 'TL' for
  188. cropping the top left part. Default: 'C'.
  189. """
  190. super().__init__()
  191. if isinstance(crop_size, int):
  192. crop_size = [crop_size, crop_size]
  193. _check_image_size(crop_size)
  194. self.crop_size = crop_size
  195. if mode not in ("C", "TL"):
  196. raise ValueError("Unsupported interpolation method")
  197. self.mode = mode
  198. def apply(self, img):
  199. """apply"""
  200. h, w = img.shape[:2]
  201. cw, ch = self.crop_size
  202. if self.mode == "C":
  203. x1 = max(0, (w - cw) // 2)
  204. y1 = max(0, (h - ch) // 2)
  205. elif self.mode == "TL":
  206. x1, y1 = 0, 0
  207. x2 = min(w, x1 + cw)
  208. y2 = min(h, y1 + ch)
  209. coords = (x1, y1, x2, y2)
  210. if coords == (0, 0, w, h):
  211. raise ValueError(
  212. f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
  213. )
  214. img = F.slice(img, coords=coords)
  215. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  216. class _BaseResize(BaseComponent):
  217. _INTERP_DICT = {
  218. "NEAREST": cv2.INTER_NEAREST,
  219. "LINEAR": cv2.INTER_LINEAR,
  220. "CUBIC": cv2.INTER_CUBIC,
  221. "AREA": cv2.INTER_AREA,
  222. "LANCZOS4": cv2.INTER_LANCZOS4,
  223. }
  224. def __init__(self, size_divisor, interp):
  225. super().__init__()
  226. if size_divisor is not None:
  227. assert isinstance(
  228. size_divisor, int
  229. ), "`size_divisor` should be None or int."
  230. self.size_divisor = size_divisor
  231. try:
  232. interp = self._INTERP_DICT[interp]
  233. except KeyError:
  234. raise ValueError(
  235. "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
  236. )
  237. self.interp = interp
  238. @staticmethod
  239. def _rescale_size(img_size, target_size):
  240. """rescale size"""
  241. scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
  242. rescaled_size = [round(i * scale) for i in img_size]
  243. return rescaled_size, scale
  244. class Resize(_BaseResize):
  245. """Resize the image."""
  246. INPUT_KEYS = "img"
  247. OUTPUT_KEYS = ["img", "img_size", "scale_factors"]
  248. DEAULT_INPUTS = {"img": "img"}
  249. DEAULT_OUTPUTS = {
  250. "img": "img",
  251. "img_size": "img_size",
  252. "scale_factors": "scale_factors",
  253. }
  254. def __init__(
  255. self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
  256. ):
  257. """
  258. Initialize the instance.
  259. Args:
  260. target_size (list|tuple|int): Target width and height.
  261. keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
  262. image. Default: False.
  263. size_divisor (int|None, optional): Divisor of resized image size.
  264. Default: None.
  265. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  266. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  267. """
  268. super().__init__(size_divisor=size_divisor, interp=interp)
  269. if isinstance(target_size, int):
  270. target_size = [target_size, target_size]
  271. _check_image_size(target_size)
  272. self.target_size = target_size
  273. self.keep_ratio = keep_ratio
  274. def apply(self, img):
  275. """apply"""
  276. target_size = self.target_size
  277. original_size = img.shape[:2]
  278. if self.keep_ratio:
  279. h, w = img.shape[0:2]
  280. target_size, _ = self._rescale_size((h, w), self.target_size)
  281. if self.size_divisor:
  282. target_size = [
  283. math.ceil(i / self.size_divisor) * self.size_divisor
  284. for i in target_size
  285. ]
  286. img_scale_w, img_scale_h = [
  287. target_size[1] / original_size[1],
  288. target_size[0] / original_size[0],
  289. ]
  290. img = F.resize(img, target_size, interp=self.interp)
  291. return {
  292. "img": img,
  293. "img_size": [img.shape[1], img.shape[0]],
  294. "scale_factors": [img_scale_w, img_scale_h],
  295. }
  296. class ResizeByLong(_BaseResize):
  297. """
  298. Proportionally resize the image by specifying the target length of the
  299. longest side.
  300. """
  301. INPUT_KEYS = "img"
  302. OUTPUT_KEYS = ["img", "img_size"]
  303. DEAULT_INPUTS = {"img": "img"}
  304. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  305. def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
  306. """
  307. Initialize the instance.
  308. Args:
  309. target_long_edge (int): Target length of the longest side of image.
  310. size_divisor (int|None, optional): Divisor of resized image size.
  311. Default: None.
  312. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  313. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  314. """
  315. super().__init__(size_divisor=size_divisor, interp=interp)
  316. self.target_long_edge = target_long_edge
  317. def apply(self, img):
  318. """apply"""
  319. h, w = img.shape[:2]
  320. scale = self.target_long_edge / max(h, w)
  321. h_resize = round(h * scale)
  322. w_resize = round(w * scale)
  323. if self.size_divisor is not None:
  324. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  325. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  326. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  327. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  328. class ResizeByShort(_BaseResize):
  329. """
  330. Proportionally resize the image by specifying the target length of the
  331. shortest side.
  332. """
  333. INPUT_KEYS = "img"
  334. OUTPUT_KEYS = ["img", "img_size"]
  335. DEAULT_INPUTS = {"img": "img"}
  336. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  337. def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
  338. """
  339. Initialize the instance.
  340. Args:
  341. target_short_edge (int): Target length of the shortest side of image.
  342. size_divisor (int|None, optional): Divisor of resized image size.
  343. Default: None.
  344. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  345. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  346. """
  347. super().__init__(size_divisor=size_divisor, interp=interp)
  348. self.target_short_edge = target_short_edge
  349. def apply(self, img):
  350. """apply"""
  351. h, w = img.shape[:2]
  352. scale = self.target_short_edge / min(h, w)
  353. h_resize = round(h * scale)
  354. w_resize = round(w * scale)
  355. if self.size_divisor is not None:
  356. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  357. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  358. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  359. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  360. class Pad(BaseComponent):
  361. """Pad the image."""
  362. INPUT_KEYS = "img"
  363. OUTPUT_KEYS = ["img", "img_size"]
  364. DEAULT_INPUTS = {"img": "img"}
  365. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  366. def __init__(self, target_size, val=127.5):
  367. """
  368. Initialize the instance.
  369. Args:
  370. target_size (list|tuple|int): Target width and height of the image after
  371. padding.
  372. val (float, optional): Value to fill the padded area. Default: 127.5.
  373. """
  374. super().__init__()
  375. if isinstance(target_size, int):
  376. target_size = [target_size, target_size]
  377. _check_image_size(target_size)
  378. self.target_size = target_size
  379. self.val = val
  380. def apply(self, img):
  381. """apply"""
  382. h, w = img.shape[:2]
  383. tw, th = self.target_size
  384. ph = th - h
  385. pw = tw - w
  386. if ph < 0 or pw < 0:
  387. raise ValueError(
  388. f"Input image ({w}, {h}) smaller than the target size ({tw}, {th})."
  389. )
  390. else:
  391. img = F.pad(img, pad=(0, ph, 0, pw), val=self.val)
  392. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  393. class PadStride(BaseComponent):
  394. """padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
  395. Args:
  396. stride (bool): model with FPN need image shape % stride == 0
  397. """
  398. INPUT_KEYS = "img"
  399. OUTPUT_KEYS = "img"
  400. DEAULT_INPUTS = {"img": "img"}
  401. DEAULT_OUTPUTS = {"img": "img"}
  402. def __init__(self, stride=0):
  403. super().__init__()
  404. self.coarsest_stride = stride
  405. def apply(self, img):
  406. """
  407. Args:
  408. im (np.ndarray): image (np.ndarray)
  409. Returns:
  410. im (np.ndarray): processed image (np.ndarray)
  411. """
  412. im = img
  413. coarsest_stride = self.coarsest_stride
  414. if coarsest_stride <= 0:
  415. return {"img": im}
  416. im_c, im_h, im_w = im.shape
  417. pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
  418. pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
  419. padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
  420. padding_im[:, :im_h, :im_w] = im
  421. return {"img": padding_im}
  422. class Normalize(BaseComponent):
  423. """Normalize the image."""
  424. INPUT_KEYS = "img"
  425. OUTPUT_KEYS = "img"
  426. DEAULT_INPUTS = {"img": "img"}
  427. DEAULT_OUTPUTS = {"img": "img"}
  428. def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
  429. """
  430. Initialize the instance.
  431. Args:
  432. scale (float, optional): Scaling factor to apply to the image before
  433. applying normalization. Default: 1/255.
  434. mean (float|tuple|list, optional): Means for each channel of the image.
  435. Default: 0.5.
  436. std (float|tuple|list, optional): Standard deviations for each channel
  437. of the image. Default: 0.5.
  438. preserve_dtype (bool, optional): Whether to preserve the original dtype
  439. of the image.
  440. """
  441. super().__init__()
  442. self.scale = np.float32(scale)
  443. if isinstance(mean, float):
  444. mean = [mean]
  445. self.mean = np.asarray(mean).astype("float32")
  446. if isinstance(std, float):
  447. std = [std]
  448. self.std = np.asarray(std).astype("float32")
  449. self.preserve_dtype = preserve_dtype
  450. def apply(self, img):
  451. """apply"""
  452. old_type = img.dtype
  453. # XXX: If `old_type` has higher precision than float32,
  454. # we will lose some precision.
  455. img = img.astype("float32", copy=False)
  456. img *= self.scale
  457. img -= self.mean
  458. img /= self.std
  459. if self.preserve_dtype:
  460. img = img.astype(old_type, copy=False)
  461. return {"img": img}
  462. class ToCHWImage(BaseComponent):
  463. """Reorder the dimensions of the image from HWC to CHW."""
  464. INPUT_KEYS = "img"
  465. OUTPUT_KEYS = "img"
  466. DEAULT_INPUTS = {"img": "img"}
  467. DEAULT_OUTPUTS = {"img": "img"}
  468. def apply(self, img):
  469. """apply"""
  470. img = img.transpose((2, 0, 1))
  471. return {"img": img}