common.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import math
  16. from pathlib import Path
  17. from copy import deepcopy
  18. import numpy as np
  19. import cv2
  20. from .....utils.download import download
  21. from .....utils.cache import CACHE_DIR
  22. from ....utils.io import ImageReader, ImageWriter
  23. from ...base import BaseComponent
  24. from . import funcs as F
  25. __all__ = [
  26. "ReadImage",
  27. "Flip",
  28. "Crop",
  29. "Resize",
  30. "ResizeByLong",
  31. "ResizeByShort",
  32. "Pad",
  33. "Normalize",
  34. "ToCHWImage",
  35. ]
  36. def _check_image_size(input_):
  37. """check image size"""
  38. if not (
  39. isinstance(input_, (list, tuple))
  40. and len(input_) == 2
  41. and isinstance(input_[0], int)
  42. and isinstance(input_[1], int)
  43. ):
  44. raise TypeError(f"{input_} cannot represent a valid image size.")
  45. class ReadImage(BaseComponent):
  46. """Load image from the file."""
  47. INPUT_KEYS = ["img"]
  48. OUTPUT_KEYS = ["img", "img_size", "ori_img", "ori_img_size"]
  49. DEAULT_INPUTS = {"img": "img"}
  50. DEAULT_OUTPUTS = {
  51. "img": "img",
  52. "img_path": "img_path",
  53. "img_size": "img_size",
  54. "ori_img": "ori_img",
  55. "ori_img_size": "ori_img_size",
  56. }
  57. _FLAGS_DICT = {
  58. "BGR": cv2.IMREAD_COLOR,
  59. "RGB": cv2.IMREAD_COLOR,
  60. "GRAY": cv2.IMREAD_GRAYSCALE,
  61. }
  62. SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp"]
  63. def __init__(self, batch_size=1, format="BGR"):
  64. """
  65. Initialize the instance.
  66. Args:
  67. format (str, optional): Target color format to convert the image to.
  68. Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
  69. """
  70. super().__init__()
  71. self.batch_size = batch_size
  72. self.format = format
  73. flags = self._FLAGS_DICT[self.format]
  74. self._reader = ImageReader(backend="opencv", flags=flags)
  75. self._writer = ImageWriter(backend="opencv")
  76. def apply(self, img):
  77. """apply"""
  78. if not isinstance(img, str):
  79. img_path = (Path(CACHE_DIR) / "predict_input" / "tmp_img.jpg").as_posix()
  80. self._writer.write(img_path, img)
  81. yield [
  82. {
  83. "img_path": img_path,
  84. "img": img,
  85. "img_size": [img.shape[1], img.shape[0]],
  86. "ori_img": deepcopy(img),
  87. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  88. }
  89. ]
  90. else:
  91. img_path = img
  92. # XXX: auto download for url
  93. img_path = self._download_from_url(img_path)
  94. image_list = self._get_image_list(img_path)
  95. batch = []
  96. for img_path in image_list:
  97. img = self._read_img(img_path)
  98. batch.append(img)
  99. if len(batch) >= self.batch_size:
  100. yield batch
  101. batch = []
  102. if len(batch) > 0:
  103. yield batch
  104. def _read_img(self, img_path):
  105. blob = self._reader.read(img_path)
  106. if blob is None:
  107. raise Exception("Image read Error")
  108. if self.format == "RGB":
  109. if blob.ndim != 3:
  110. raise RuntimeError("Array is not 3-dimensional.")
  111. # BGR to RGB
  112. blob = blob[..., ::-1]
  113. return {
  114. "img_path": img_path,
  115. "img": blob,
  116. "img_size": [blob.shape[1], blob.shape[0]],
  117. "ori_img": deepcopy(blob),
  118. "ori_img_size": deepcopy([blob.shape[1], blob.shape[0]]),
  119. }
  120. def _download_from_url(self, in_path):
  121. if in_path.startswith("http"):
  122. file_name = Path(in_path).name
  123. save_path = Path(CACHE_DIR) / "predict_input" / file_name
  124. download(in_path, save_path, overwrite=True)
  125. return save_path.as_posix()
  126. return in_path
  127. def _get_image_list(self, img_file):
  128. imgs_lists = []
  129. if img_file is None or not os.path.exists(img_file):
  130. raise Exception(f"Not found any img file in path: {img_file}")
  131. if os.path.isfile(img_file) and img_file.split(".")[-1] in self.SUFFIX:
  132. imgs_lists.append(img_file)
  133. elif os.path.isdir(img_file):
  134. for root, dirs, files in os.walk(img_file):
  135. for single_file in files:
  136. if single_file.split(".")[-1] in self.SUFFIX:
  137. imgs_lists.append(os.path.join(root, single_file))
  138. if len(imgs_lists) == 0:
  139. raise Exception("not found any img file in {}".format(img_file))
  140. imgs_lists = sorted(imgs_lists)
  141. return imgs_lists
  142. def set_batch_size(self, batch_size):
  143. self.batch_size = batch_size
  144. class GetImageInfo(BaseComponent):
  145. """Get Image Info"""
  146. INPUT_KEYS = "img"
  147. OUTPUT_KEYS = "img_size"
  148. DEAULT_INPUTS = {"img": "img"}
  149. DEAULT_OUTPUTS = {"img_size": "img_size"}
  150. def __init__(self):
  151. super().__init__()
  152. def apply(self, img):
  153. """apply"""
  154. return {"img_size": [img.shape[1], img.shape[0]]}
  155. class Flip(BaseComponent):
  156. """Flip the image vertically or horizontally."""
  157. INPUT_KEYS = "img"
  158. OUTPUT_KEYS = "img"
  159. DEAULT_INPUTS = {"img": "img"}
  160. DEAULT_OUTPUTS = {"img": "img"}
  161. def __init__(self, mode="H"):
  162. """
  163. Initialize the instance.
  164. Args:
  165. mode (str, optional): 'H' for horizontal flipping and 'V' for vertical
  166. flipping. Default: 'H'.
  167. """
  168. super().__init__()
  169. if mode not in ("H", "V"):
  170. raise ValueError("`mode` should be 'H' or 'V'.")
  171. self.mode = mode
  172. def apply(self, img):
  173. """apply"""
  174. if self.mode == "H":
  175. img = F.flip_h(img)
  176. elif self.mode == "V":
  177. img = F.flip_v(img)
  178. return {"img": img}
  179. class Crop(BaseComponent):
  180. """Crop region from the image."""
  181. INPUT_KEYS = "img"
  182. OUTPUT_KEYS = ["img", "img_size"]
  183. DEAULT_INPUTS = {"img": "img"}
  184. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  185. def __init__(self, crop_size, mode="C"):
  186. """
  187. Initialize the instance.
  188. Args:
  189. crop_size (list|tuple|int): Width and height of the region to crop.
  190. mode (str, optional): 'C' for cropping the center part and 'TL' for
  191. cropping the top left part. Default: 'C'.
  192. """
  193. super().__init__()
  194. if isinstance(crop_size, int):
  195. crop_size = [crop_size, crop_size]
  196. _check_image_size(crop_size)
  197. self.crop_size = crop_size
  198. if mode not in ("C", "TL"):
  199. raise ValueError("Unsupported interpolation method")
  200. self.mode = mode
  201. def apply(self, img):
  202. """apply"""
  203. h, w = img.shape[:2]
  204. cw, ch = self.crop_size
  205. if self.mode == "C":
  206. x1 = max(0, (w - cw) // 2)
  207. y1 = max(0, (h - ch) // 2)
  208. elif self.mode == "TL":
  209. x1, y1 = 0, 0
  210. x2 = min(w, x1 + cw)
  211. y2 = min(h, y1 + ch)
  212. coords = (x1, y1, x2, y2)
  213. if coords == (0, 0, w, h):
  214. raise ValueError(
  215. f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
  216. )
  217. img = F.slice(img, coords=coords)
  218. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  219. class _BaseResize(BaseComponent):
  220. _INTERP_DICT = {
  221. "NEAREST": cv2.INTER_NEAREST,
  222. "LINEAR": cv2.INTER_LINEAR,
  223. "CUBIC": cv2.INTER_CUBIC,
  224. "AREA": cv2.INTER_AREA,
  225. "LANCZOS4": cv2.INTER_LANCZOS4,
  226. }
  227. def __init__(self, size_divisor, interp):
  228. super().__init__()
  229. if size_divisor is not None:
  230. assert isinstance(
  231. size_divisor, int
  232. ), "`size_divisor` should be None or int."
  233. self.size_divisor = size_divisor
  234. try:
  235. interp = self._INTERP_DICT[interp]
  236. except KeyError:
  237. raise ValueError(
  238. "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
  239. )
  240. self.interp = interp
  241. @staticmethod
  242. def _rescale_size(img_size, target_size):
  243. """rescale size"""
  244. scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
  245. rescaled_size = [round(i * scale) for i in img_size]
  246. return rescaled_size, scale
  247. class Resize(_BaseResize):
  248. """Resize the image."""
  249. INPUT_KEYS = "img"
  250. OUTPUT_KEYS = ["img", "img_size", "scale_factors"]
  251. DEAULT_INPUTS = {"img": "img"}
  252. DEAULT_OUTPUTS = {
  253. "img": "img",
  254. "img_size": "img_size",
  255. "scale_factors": "scale_factors",
  256. }
  257. def __init__(
  258. self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
  259. ):
  260. """
  261. Initialize the instance.
  262. Args:
  263. target_size (list|tuple|int): Target width and height.
  264. keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
  265. image. Default: False.
  266. size_divisor (int|None, optional): Divisor of resized image size.
  267. Default: None.
  268. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  269. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  270. """
  271. super().__init__(size_divisor=size_divisor, interp=interp)
  272. if isinstance(target_size, int):
  273. target_size = [target_size, target_size]
  274. _check_image_size(target_size)
  275. self.target_size = target_size
  276. self.keep_ratio = keep_ratio
  277. def apply(self, img):
  278. """apply"""
  279. target_size = self.target_size
  280. original_size = img.shape[:2]
  281. if self.keep_ratio:
  282. h, w = img.shape[0:2]
  283. target_size, _ = self._rescale_size((w, h), self.target_size)
  284. if self.size_divisor:
  285. target_size = [
  286. math.ceil(i / self.size_divisor) * self.size_divisor
  287. for i in target_size
  288. ]
  289. img_scale_w, img_scale_h = [
  290. target_size[1] / original_size[1],
  291. target_size[0] / original_size[0],
  292. ]
  293. img = F.resize(img, target_size, interp=self.interp)
  294. return {
  295. "img": img,
  296. "img_size": [img.shape[1], img.shape[0]],
  297. "scale_factors": [img_scale_w, img_scale_h],
  298. }
  299. class ResizeByLong(_BaseResize):
  300. """
  301. Proportionally resize the image by specifying the target length of the
  302. longest side.
  303. """
  304. INPUT_KEYS = "img"
  305. OUTPUT_KEYS = ["img", "img_size"]
  306. DEAULT_INPUTS = {"img": "img"}
  307. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  308. def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
  309. """
  310. Initialize the instance.
  311. Args:
  312. target_long_edge (int): Target length of the longest side of image.
  313. size_divisor (int|None, optional): Divisor of resized image size.
  314. Default: None.
  315. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  316. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  317. """
  318. super().__init__(size_divisor=size_divisor, interp=interp)
  319. self.target_long_edge = target_long_edge
  320. def apply(self, img):
  321. """apply"""
  322. h, w = img.shape[:2]
  323. scale = self.target_long_edge / max(h, w)
  324. h_resize = round(h * scale)
  325. w_resize = round(w * scale)
  326. if self.size_divisor is not None:
  327. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  328. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  329. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  330. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  331. class ResizeByShort(_BaseResize):
  332. """
  333. Proportionally resize the image by specifying the target length of the
  334. shortest side.
  335. """
  336. INPUT_KEYS = "img"
  337. OUTPUT_KEYS = ["img", "img_size"]
  338. DEAULT_INPUTS = {"img": "img"}
  339. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  340. def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
  341. """
  342. Initialize the instance.
  343. Args:
  344. target_short_edge (int): Target length of the shortest side of image.
  345. size_divisor (int|None, optional): Divisor of resized image size.
  346. Default: None.
  347. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  348. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  349. """
  350. super().__init__(size_divisor=size_divisor, interp=interp)
  351. self.target_short_edge = target_short_edge
  352. def apply(self, img):
  353. """apply"""
  354. h, w = img.shape[:2]
  355. scale = self.target_short_edge / min(h, w)
  356. h_resize = round(h * scale)
  357. w_resize = round(w * scale)
  358. if self.size_divisor is not None:
  359. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  360. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  361. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  362. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  363. class Pad(BaseComponent):
  364. """Pad the image."""
  365. INPUT_KEYS = "img"
  366. OUTPUT_KEYS = ["img", "img_size"]
  367. DEAULT_INPUTS = {"img": "img"}
  368. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  369. def __init__(self, target_size, val=127.5):
  370. """
  371. Initialize the instance.
  372. Args:
  373. target_size (list|tuple|int): Target width and height of the image after
  374. padding.
  375. val (float, optional): Value to fill the padded area. Default: 127.5.
  376. """
  377. super().__init__()
  378. if isinstance(target_size, int):
  379. target_size = [target_size, target_size]
  380. _check_image_size(target_size)
  381. self.target_size = target_size
  382. self.val = val
  383. def apply(self, img):
  384. """apply"""
  385. h, w = img.shape[:2]
  386. tw, th = self.target_size
  387. ph = th - h
  388. pw = tw - w
  389. if ph < 0 or pw < 0:
  390. raise ValueError(
  391. f"Input image ({w}, {h}) smaller than the target size ({tw}, {th})."
  392. )
  393. else:
  394. img = F.pad(img, pad=(0, ph, 0, pw), val=self.val)
  395. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  396. class Normalize(BaseComponent):
  397. """Normalize the image."""
  398. INPUT_KEYS = "img"
  399. OUTPUT_KEYS = "img"
  400. DEAULT_INPUTS = {"img": "img"}
  401. DEAULT_OUTPUTS = {"img": "img"}
  402. def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
  403. """
  404. Initialize the instance.
  405. Args:
  406. scale (float, optional): Scaling factor to apply to the image before
  407. applying normalization. Default: 1/255.
  408. mean (float|tuple|list, optional): Means for each channel of the image.
  409. Default: 0.5.
  410. std (float|tuple|list, optional): Standard deviations for each channel
  411. of the image. Default: 0.5.
  412. preserve_dtype (bool, optional): Whether to preserve the original dtype
  413. of the image.
  414. """
  415. super().__init__()
  416. self.scale = np.float32(scale)
  417. if isinstance(mean, float):
  418. mean = [mean]
  419. self.mean = np.asarray(mean).astype("float32")
  420. if isinstance(std, float):
  421. std = [std]
  422. self.std = np.asarray(std).astype("float32")
  423. self.preserve_dtype = preserve_dtype
  424. def apply(self, img):
  425. """apply"""
  426. old_type = img.dtype
  427. # XXX: If `old_type` has higher precision than float32,
  428. # we will lose some precision.
  429. img = img.astype("float32", copy=False)
  430. img *= self.scale
  431. img -= self.mean
  432. img /= self.std
  433. if self.preserve_dtype:
  434. img = img.astype(old_type, copy=False)
  435. return {"img": img}
  436. class ToCHWImage(BaseComponent):
  437. """Reorder the dimensions of the image from HWC to CHW."""
  438. INPUT_KEYS = "img"
  439. OUTPUT_KEYS = "img"
  440. DEAULT_INPUTS = {"img": "img"}
  441. DEAULT_OUTPUTS = {"img": "img"}
  442. def apply(self, img):
  443. """apply"""
  444. img = img.transpose((2, 0, 1))
  445. return {"img": img}