common.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. from pathlib import Path
  16. from copy import deepcopy
  17. import numpy as np
  18. import cv2
  19. from .....utils.flags import INFER_BENCHMARK, INFER_BENCHMARK_DATA_SIZE
  20. from .....utils.cache import CACHE_DIR, temp_file_manager
  21. from ....utils.io import ImageReader, ImageWriter, PDFReader
  22. from ...base import BaseComponent
  23. from ..read_data import _BaseRead
  24. from . import funcs as F
  25. __all__ = [
  26. "ReadImage",
  27. "Flip",
  28. "Crop",
  29. "Resize",
  30. "ResizeByLong",
  31. "ResizeByShort",
  32. "Pad",
  33. "Normalize",
  34. "ToCHWImage",
  35. "PadStride",
  36. ]
  37. def _check_image_size(input_):
  38. """check image size"""
  39. if not (
  40. isinstance(input_, (list, tuple))
  41. and len(input_) == 2
  42. and isinstance(input_[0], int)
  43. and isinstance(input_[1], int)
  44. ):
  45. raise TypeError(f"{input_} cannot represent a valid image size.")
  46. class ReadImage(_BaseRead):
  47. """Load image from the file."""
  48. INPUT_KEYS = ["img"]
  49. OUTPUT_KEYS = ["img", "img_size", "ori_img", "ori_img_size"]
  50. DEAULT_INPUTS = {"img": "img"}
  51. DEAULT_OUTPUTS = {
  52. "img": "img",
  53. "input_path": "input_path",
  54. "img_size": "img_size",
  55. "ori_img": "ori_img",
  56. "ori_img_size": "ori_img_size",
  57. }
  58. _FLAGS_DICT = {
  59. "BGR": cv2.IMREAD_COLOR,
  60. "RGB": cv2.IMREAD_COLOR,
  61. "GRAY": cv2.IMREAD_GRAYSCALE,
  62. }
  63. SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp", "PDF", "pdf"]
  64. def __init__(self, batch_size=1, format="BGR"):
  65. """
  66. Initialize the instance.
  67. Args:
  68. format (str, optional): Target color format to convert the image to.
  69. Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
  70. """
  71. super().__init__(batch_size)
  72. self.format = format
  73. flags = self._FLAGS_DICT[self.format]
  74. self._img_reader = ImageReader(backend="opencv", flags=flags)
  75. self._pdf_reader = PDFReader()
  76. self._writer = ImageWriter(backend="opencv")
  77. def apply(self, img):
  78. """apply"""
  79. def process_ndarray(img):
  80. with temp_file_manager.temp_file_context(suffix=".png") as temp_file:
  81. img_path = Path(temp_file.name)
  82. self._writer.write(img_path, img)
  83. if self.format == "RGB":
  84. img = img[:, :, ::-1]
  85. return {
  86. "input_path": img_path,
  87. "img": img,
  88. "img_size": [img.shape[1], img.shape[0]],
  89. "ori_img": deepcopy(img),
  90. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  91. }
  92. if INFER_BENCHMARK and img is None:
  93. size = int(INFER_BENCHMARK_DATA_SIZE)
  94. yield [
  95. process_ndarray(
  96. np.random.randint(0, 256, (size, size, 3), dtype=np.uint8)
  97. )
  98. for _ in range(self.batch_size)
  99. ]
  100. elif isinstance(img, np.ndarray):
  101. yield [process_ndarray(img)]
  102. elif isinstance(img, str):
  103. file_path = img
  104. file_path = self._download_from_url(file_path)
  105. file_list = self._get_files_list(file_path)
  106. batch = []
  107. for file_path in file_list:
  108. img = self._read(file_path)
  109. batch.extend(img)
  110. if len(batch) >= self.batch_size:
  111. yield batch
  112. batch = []
  113. if len(batch) > 0:
  114. yield batch
  115. else:
  116. raise TypeError(
  117. f"ReadImage only supports the following types:\n"
  118. f"1. str, indicating a image file path or a directory containing image files.\n"
  119. f"2. numpy.ndarray.\n"
  120. f"However, got type: {type(img).__name__}."
  121. )
  122. def _read(self, file_path):
  123. if str(file_path).lower().endswith(".pdf"):
  124. return self._read_pdf(file_path)
  125. else:
  126. return self._read_img(file_path)
  127. def _read_img(self, img_path):
  128. blob = self._img_reader.read(img_path)
  129. if blob is None:
  130. raise Exception("Image read Error")
  131. if self.format == "RGB":
  132. if blob.ndim != 3:
  133. raise RuntimeError("Array is not 3-dimensional.")
  134. # BGR to RGB
  135. blob = blob[..., ::-1]
  136. return [
  137. {
  138. "input_path": img_path,
  139. "img": blob,
  140. "img_size": [blob.shape[1], blob.shape[0]],
  141. "ori_img": deepcopy(blob),
  142. "ori_img_size": deepcopy([blob.shape[1], blob.shape[0]]),
  143. }
  144. ]
  145. def _read_pdf(self, pdf_path):
  146. img_list = self._pdf_reader.read(pdf_path)
  147. return [
  148. {
  149. "input_path": pdf_path,
  150. "img": img,
  151. "img_size": [img.shape[1], img.shape[0]],
  152. "ori_img": deepcopy(img),
  153. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  154. }
  155. for img in img_list
  156. ]
  157. class GetImageInfo(BaseComponent):
  158. """Get Image Info"""
  159. INPUT_KEYS = "img"
  160. OUTPUT_KEYS = "img_size"
  161. DEAULT_INPUTS = {"img": "img"}
  162. DEAULT_OUTPUTS = {"img_size": "img_size"}
  163. def __init__(self):
  164. super().__init__()
  165. def apply(self, img):
  166. """apply"""
  167. return {"img_size": [img.shape[1], img.shape[0]]}
  168. class Flip(BaseComponent):
  169. """Flip the image vertically or horizontally."""
  170. INPUT_KEYS = "img"
  171. OUTPUT_KEYS = "img"
  172. DEAULT_INPUTS = {"img": "img"}
  173. DEAULT_OUTPUTS = {"img": "img"}
  174. def __init__(self, mode="H"):
  175. """
  176. Initialize the instance.
  177. Args:
  178. mode (str, optional): 'H' for horizontal flipping and 'V' for vertical
  179. flipping. Default: 'H'.
  180. """
  181. super().__init__()
  182. if mode not in ("H", "V"):
  183. raise ValueError("`mode` should be 'H' or 'V'.")
  184. self.mode = mode
  185. def apply(self, img):
  186. """apply"""
  187. if self.mode == "H":
  188. img = F.flip_h(img)
  189. elif self.mode == "V":
  190. img = F.flip_v(img)
  191. return {"img": img}
  192. class Crop(BaseComponent):
  193. """Crop region from the image."""
  194. INPUT_KEYS = "img"
  195. OUTPUT_KEYS = ["img", "img_size"]
  196. DEAULT_INPUTS = {"img": "img"}
  197. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  198. def __init__(self, crop_size, mode="C"):
  199. """
  200. Initialize the instance.
  201. Args:
  202. crop_size (list|tuple|int): Width and height of the region to crop.
  203. mode (str, optional): 'C' for cropping the center part and 'TL' for
  204. cropping the top left part. Default: 'C'.
  205. """
  206. super().__init__()
  207. if isinstance(crop_size, int):
  208. crop_size = [crop_size, crop_size]
  209. _check_image_size(crop_size)
  210. self.crop_size = crop_size
  211. if mode not in ("C", "TL"):
  212. raise ValueError("Unsupported interpolation method")
  213. self.mode = mode
  214. def apply(self, img):
  215. """apply"""
  216. h, w = img.shape[:2]
  217. cw, ch = self.crop_size
  218. if self.mode == "C":
  219. x1 = max(0, (w - cw) // 2)
  220. y1 = max(0, (h - ch) // 2)
  221. elif self.mode == "TL":
  222. x1, y1 = 0, 0
  223. x2 = min(w, x1 + cw)
  224. y2 = min(h, y1 + ch)
  225. coords = (x1, y1, x2, y2)
  226. if coords == (0, 0, w, h):
  227. raise ValueError(
  228. f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
  229. )
  230. img = F.slice(img, coords=coords)
  231. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  232. class _BaseResize(BaseComponent):
  233. _INTERP_DICT = {
  234. "NEAREST": cv2.INTER_NEAREST,
  235. "LINEAR": cv2.INTER_LINEAR,
  236. "CUBIC": cv2.INTER_CUBIC,
  237. "AREA": cv2.INTER_AREA,
  238. "LANCZOS4": cv2.INTER_LANCZOS4,
  239. }
  240. def __init__(self, size_divisor, interp):
  241. super().__init__()
  242. if size_divisor is not None:
  243. assert isinstance(
  244. size_divisor, int
  245. ), "`size_divisor` should be None or int."
  246. self.size_divisor = size_divisor
  247. try:
  248. interp = self._INTERP_DICT[interp]
  249. except KeyError:
  250. raise ValueError(
  251. "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
  252. )
  253. self.interp = interp
  254. @staticmethod
  255. def _rescale_size(img_size, target_size):
  256. """rescale size"""
  257. scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
  258. rescaled_size = [round(i * scale) for i in img_size]
  259. return rescaled_size, scale
  260. class Resize(_BaseResize):
  261. """Resize the image."""
  262. INPUT_KEYS = "img"
  263. OUTPUT_KEYS = ["img", "img_size", "scale_factors"]
  264. DEAULT_INPUTS = {"img": "img"}
  265. DEAULT_OUTPUTS = {
  266. "img": "img",
  267. "img_size": "img_size",
  268. "scale_factors": "scale_factors",
  269. }
  270. def __init__(
  271. self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
  272. ):
  273. """
  274. Initialize the instance.
  275. Args:
  276. target_size (list|tuple|int): Target width and height.
  277. keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
  278. image. Default: False.
  279. size_divisor (int|None, optional): Divisor of resized image size.
  280. Default: None.
  281. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  282. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  283. """
  284. super().__init__(size_divisor=size_divisor, interp=interp)
  285. if isinstance(target_size, int):
  286. target_size = [target_size, target_size]
  287. _check_image_size(target_size)
  288. self.target_size = target_size
  289. self.keep_ratio = keep_ratio
  290. def apply(self, img):
  291. """apply"""
  292. target_size = self.target_size
  293. original_size = img.shape[:2][::-1]
  294. if self.keep_ratio:
  295. h, w = img.shape[0:2]
  296. target_size, _ = self._rescale_size((w, h), self.target_size)
  297. if self.size_divisor:
  298. target_size = [
  299. math.ceil(i / self.size_divisor) * self.size_divisor
  300. for i in target_size
  301. ]
  302. img_scale_w, img_scale_h = [
  303. target_size[0] / original_size[0],
  304. target_size[1] / original_size[1],
  305. ]
  306. img = F.resize(img, target_size, interp=self.interp)
  307. return {
  308. "img": img,
  309. "img_size": [img.shape[1], img.shape[0]],
  310. "scale_factors": [img_scale_w, img_scale_h],
  311. }
  312. class ResizeByLong(_BaseResize):
  313. """
  314. Proportionally resize the image by specifying the target length of the
  315. longest side.
  316. """
  317. INPUT_KEYS = "img"
  318. OUTPUT_KEYS = ["img", "img_size"]
  319. DEAULT_INPUTS = {"img": "img"}
  320. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  321. def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
  322. """
  323. Initialize the instance.
  324. Args:
  325. target_long_edge (int): Target length of the longest side of image.
  326. size_divisor (int|None, optional): Divisor of resized image size.
  327. Default: None.
  328. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  329. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  330. """
  331. super().__init__(size_divisor=size_divisor, interp=interp)
  332. self.target_long_edge = target_long_edge
  333. def apply(self, img):
  334. """apply"""
  335. h, w = img.shape[:2]
  336. scale = self.target_long_edge / max(h, w)
  337. h_resize = round(h * scale)
  338. w_resize = round(w * scale)
  339. if self.size_divisor is not None:
  340. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  341. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  342. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  343. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  344. class ResizeByShort(_BaseResize):
  345. """
  346. Proportionally resize the image by specifying the target length of the
  347. shortest side.
  348. """
  349. INPUT_KEYS = "img"
  350. OUTPUT_KEYS = ["img", "img_size"]
  351. DEAULT_INPUTS = {"img": "img"}
  352. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  353. def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
  354. """
  355. Initialize the instance.
  356. Args:
  357. target_short_edge (int): Target length of the shortest side of image.
  358. size_divisor (int|None, optional): Divisor of resized image size.
  359. Default: None.
  360. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  361. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  362. """
  363. super().__init__(size_divisor=size_divisor, interp=interp)
  364. self.target_short_edge = target_short_edge
  365. def apply(self, img):
  366. """apply"""
  367. h, w = img.shape[:2]
  368. scale = self.target_short_edge / min(h, w)
  369. h_resize = round(h * scale)
  370. w_resize = round(w * scale)
  371. if self.size_divisor is not None:
  372. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  373. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  374. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  375. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  376. class Pad(BaseComponent):
  377. """Pad the image."""
  378. INPUT_KEYS = "img"
  379. OUTPUT_KEYS = ["img", "img_size"]
  380. DEAULT_INPUTS = {"img": "img"}
  381. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  382. def __init__(self, target_size, val=127.5):
  383. """
  384. Initialize the instance.
  385. Args:
  386. target_size (list|tuple|int): Target width and height of the image after
  387. padding.
  388. val (float, optional): Value to fill the padded area. Default: 127.5.
  389. """
  390. super().__init__()
  391. if isinstance(target_size, int):
  392. target_size = [target_size, target_size]
  393. _check_image_size(target_size)
  394. self.target_size = target_size
  395. self.val = val
  396. def apply(self, img):
  397. """apply"""
  398. h, w = img.shape[:2]
  399. tw, th = self.target_size
  400. ph = th - h
  401. pw = tw - w
  402. if ph < 0 or pw < 0:
  403. raise ValueError(
  404. f"Input image ({w}, {h}) smaller than the target size ({tw}, {th})."
  405. )
  406. else:
  407. img = F.pad(img, pad=(0, ph, 0, pw), val=self.val)
  408. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  409. class PadStride(BaseComponent):
  410. """padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
  411. Args:
  412. stride (bool): model with FPN need image shape % stride == 0
  413. """
  414. INPUT_KEYS = "img"
  415. OUTPUT_KEYS = "img"
  416. DEAULT_INPUTS = {"img": "img"}
  417. DEAULT_OUTPUTS = {"img": "img"}
  418. def __init__(self, stride=0):
  419. super().__init__()
  420. self.coarsest_stride = stride
  421. def apply(self, img):
  422. """
  423. Args:
  424. im (np.ndarray): image (np.ndarray)
  425. Returns:
  426. im (np.ndarray): processed image (np.ndarray)
  427. """
  428. im = img
  429. coarsest_stride = self.coarsest_stride
  430. if coarsest_stride <= 0:
  431. return {"img": im}
  432. im_c, im_h, im_w = im.shape
  433. pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
  434. pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
  435. padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
  436. padding_im[:, :im_h, :im_w] = im
  437. return {"img": padding_im}
  438. class Normalize(BaseComponent):
  439. """Normalize the image."""
  440. INPUT_KEYS = "img"
  441. OUTPUT_KEYS = "img"
  442. DEAULT_INPUTS = {"img": "img"}
  443. DEAULT_OUTPUTS = {"img": "img"}
  444. def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
  445. """
  446. Initialize the instance.
  447. Args:
  448. scale (float, optional): Scaling factor to apply to the image before
  449. applying normalization. Default: 1/255.
  450. mean (float|tuple|list, optional): Means for each channel of the image.
  451. Default: 0.5.
  452. std (float|tuple|list, optional): Standard deviations for each channel
  453. of the image. Default: 0.5.
  454. preserve_dtype (bool, optional): Whether to preserve the original dtype
  455. of the image.
  456. """
  457. super().__init__()
  458. self.scale = np.float32(scale)
  459. if isinstance(mean, float):
  460. mean = [mean]
  461. self.mean = np.asarray(mean).astype("float32")
  462. if isinstance(std, float):
  463. std = [std]
  464. self.std = np.asarray(std).astype("float32")
  465. self.preserve_dtype = preserve_dtype
  466. def apply(self, img):
  467. """apply"""
  468. old_type = img.dtype
  469. # XXX: If `old_type` has higher precision than float32,
  470. # we will lose some precision.
  471. img = img.astype("float32", copy=False)
  472. img *= self.scale
  473. img -= self.mean
  474. img /= self.std
  475. if self.preserve_dtype:
  476. img = img.astype(old_type, copy=False)
  477. return {"img": img}
  478. class ToCHWImage(BaseComponent):
  479. """Reorder the dimensions of the image from HWC to CHW."""
  480. INPUT_KEYS = "img"
  481. OUTPUT_KEYS = "img"
  482. DEAULT_INPUTS = {"img": "img"}
  483. DEAULT_OUTPUTS = {"img": "img"}
  484. def apply(self, img):
  485. """apply"""
  486. img = img.transpose((2, 0, 1))
  487. return {"img": img}