common.py 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import math
  16. from pathlib import Path
  17. from copy import deepcopy
  18. import numpy as np
  19. import cv2
  20. from .....utils.download import download
  21. from .....utils.cache import CACHE_DIR
  22. from ....utils.io import ImageReader, ImageWriter
  23. from ...base import BaseComponent
  24. from . import funcs as F
  25. __all__ = [
  26. "ReadImage",
  27. "Flip",
  28. "Crop",
  29. "Resize",
  30. "ResizeByLong",
  31. "ResizeByShort",
  32. "Pad",
  33. "Normalize",
  34. "ToCHWImage",
  35. "PadStride",
  36. ]
  37. def _check_image_size(input_):
  38. """check image size"""
  39. if not (
  40. isinstance(input_, (list, tuple))
  41. and len(input_) == 2
  42. and isinstance(input_[0], int)
  43. and isinstance(input_[1], int)
  44. ):
  45. raise TypeError(f"{input_} cannot represent a valid image size.")
  46. class ReadImage(BaseComponent):
  47. """Load image from the file."""
  48. INPUT_KEYS = ["img"]
  49. OUTPUT_KEYS = ["img", "img_size", "ori_img", "ori_img_size"]
  50. DEAULT_INPUTS = {"img": "img"}
  51. DEAULT_OUTPUTS = {
  52. "img": "img",
  53. "img_path": "img_path",
  54. "img_size": "img_size",
  55. "ori_img": "ori_img",
  56. "ori_img_size": "ori_img_size",
  57. }
  58. _FLAGS_DICT = {
  59. "BGR": cv2.IMREAD_COLOR,
  60. "RGB": cv2.IMREAD_COLOR,
  61. "GRAY": cv2.IMREAD_GRAYSCALE,
  62. }
  63. SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp"]
  64. def __init__(self, batch_size=1, format="BGR"):
  65. """
  66. Initialize the instance.
  67. Args:
  68. format (str, optional): Target color format to convert the image to.
  69. Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
  70. """
  71. super().__init__()
  72. self.batch_size = batch_size
  73. self.format = format
  74. flags = self._FLAGS_DICT[self.format]
  75. self._reader = ImageReader(backend="opencv", flags=flags)
  76. self._writer = ImageWriter(backend="opencv")
  77. def apply(self, img):
  78. """apply"""
  79. if not isinstance(img, str):
  80. img_path = (Path(CACHE_DIR) / "predict_input" / "tmp_img.jpg").as_posix()
  81. self._writer.write(img_path, img)
  82. yield [
  83. {
  84. "img_path": img_path,
  85. "img": img,
  86. "img_size": [img.shape[1], img.shape[0]],
  87. "ori_img": deepcopy(img),
  88. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  89. }
  90. ]
  91. else:
  92. img_path = img
  93. # XXX: auto download for url
  94. img_path = self._download_from_url(img_path)
  95. image_list = self._get_image_list(img_path)
  96. batch = []
  97. for img_path in image_list:
  98. img = self._read_img(img_path)
  99. batch.append(img)
  100. if len(batch) >= self.batch_size:
  101. yield batch
  102. batch = []
  103. if len(batch) > 0:
  104. yield batch
  105. def _read_img(self, img_path):
  106. blob = self._reader.read(img_path)
  107. if blob is None:
  108. raise Exception("Image read Error")
  109. if self.format == "RGB":
  110. if blob.ndim != 3:
  111. raise RuntimeError("Array is not 3-dimensional.")
  112. # BGR to RGB
  113. blob = blob[..., ::-1]
  114. return {
  115. "img_path": img_path,
  116. "img": blob,
  117. "img_size": [blob.shape[1], blob.shape[0]],
  118. "ori_img": deepcopy(blob),
  119. "ori_img_size": deepcopy([blob.shape[1], blob.shape[0]]),
  120. }
  121. def _download_from_url(self, in_path):
  122. if in_path.startswith("http"):
  123. file_name = Path(in_path).name
  124. save_path = Path(CACHE_DIR) / "predict_input" / file_name
  125. download(in_path, save_path, overwrite=True)
  126. return save_path.as_posix()
  127. return in_path
  128. def _get_image_list(self, img_file):
  129. imgs_lists = []
  130. if img_file is None or not os.path.exists(img_file):
  131. raise Exception(f"Not found any img file in path: {img_file}")
  132. if os.path.isfile(img_file) and img_file.split(".")[-1] in self.SUFFIX:
  133. imgs_lists.append(img_file)
  134. elif os.path.isdir(img_file):
  135. for root, dirs, files in os.walk(img_file):
  136. for single_file in files:
  137. if single_file.split(".")[-1] in self.SUFFIX:
  138. imgs_lists.append(os.path.join(root, single_file))
  139. if len(imgs_lists) == 0:
  140. raise Exception("not found any img file in {}".format(img_file))
  141. imgs_lists = sorted(imgs_lists)
  142. return imgs_lists
  143. def set_batch_size(self, batch_size):
  144. self.batch_size = batch_size
  145. class GetImageInfo(BaseComponent):
  146. """Get Image Info"""
  147. INPUT_KEYS = "img"
  148. OUTPUT_KEYS = "img_size"
  149. DEAULT_INPUTS = {"img": "img"}
  150. DEAULT_OUTPUTS = {"img_size": "img_size"}
  151. def __init__(self):
  152. super().__init__()
  153. def apply(self, img):
  154. """apply"""
  155. return {"img_size": [img.shape[1], img.shape[0]]}
  156. class Flip(BaseComponent):
  157. """Flip the image vertically or horizontally."""
  158. INPUT_KEYS = "img"
  159. OUTPUT_KEYS = "img"
  160. DEAULT_INPUTS = {"img": "img"}
  161. DEAULT_OUTPUTS = {"img": "img"}
  162. def __init__(self, mode="H"):
  163. """
  164. Initialize the instance.
  165. Args:
  166. mode (str, optional): 'H' for horizontal flipping and 'V' for vertical
  167. flipping. Default: 'H'.
  168. """
  169. super().__init__()
  170. if mode not in ("H", "V"):
  171. raise ValueError("`mode` should be 'H' or 'V'.")
  172. self.mode = mode
  173. def apply(self, img):
  174. """apply"""
  175. if self.mode == "H":
  176. img = F.flip_h(img)
  177. elif self.mode == "V":
  178. img = F.flip_v(img)
  179. return {"img": img}
  180. class Crop(BaseComponent):
  181. """Crop region from the image."""
  182. INPUT_KEYS = "img"
  183. OUTPUT_KEYS = ["img", "img_size"]
  184. DEAULT_INPUTS = {"img": "img"}
  185. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  186. def __init__(self, crop_size, mode="C"):
  187. """
  188. Initialize the instance.
  189. Args:
  190. crop_size (list|tuple|int): Width and height of the region to crop.
  191. mode (str, optional): 'C' for cropping the center part and 'TL' for
  192. cropping the top left part. Default: 'C'.
  193. """
  194. super().__init__()
  195. if isinstance(crop_size, int):
  196. crop_size = [crop_size, crop_size]
  197. _check_image_size(crop_size)
  198. self.crop_size = crop_size
  199. if mode not in ("C", "TL"):
  200. raise ValueError("Unsupported interpolation method")
  201. self.mode = mode
  202. def apply(self, img):
  203. """apply"""
  204. h, w = img.shape[:2]
  205. cw, ch = self.crop_size
  206. if self.mode == "C":
  207. x1 = max(0, (w - cw) // 2)
  208. y1 = max(0, (h - ch) // 2)
  209. elif self.mode == "TL":
  210. x1, y1 = 0, 0
  211. x2 = min(w, x1 + cw)
  212. y2 = min(h, y1 + ch)
  213. coords = (x1, y1, x2, y2)
  214. if coords == (0, 0, w, h):
  215. raise ValueError(
  216. f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
  217. )
  218. img = F.slice(img, coords=coords)
  219. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  220. class _BaseResize(BaseComponent):
  221. _INTERP_DICT = {
  222. "NEAREST": cv2.INTER_NEAREST,
  223. "LINEAR": cv2.INTER_LINEAR,
  224. "CUBIC": cv2.INTER_CUBIC,
  225. "AREA": cv2.INTER_AREA,
  226. "LANCZOS4": cv2.INTER_LANCZOS4,
  227. }
  228. def __init__(self, size_divisor, interp):
  229. super().__init__()
  230. if size_divisor is not None:
  231. assert isinstance(
  232. size_divisor, int
  233. ), "`size_divisor` should be None or int."
  234. self.size_divisor = size_divisor
  235. try:
  236. interp = self._INTERP_DICT[interp]
  237. except KeyError:
  238. raise ValueError(
  239. "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
  240. )
  241. self.interp = interp
  242. @staticmethod
  243. def _rescale_size(img_size, target_size):
  244. """rescale size"""
  245. scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
  246. rescaled_size = [round(i * scale) for i in img_size]
  247. return rescaled_size, scale
  248. class Resize(_BaseResize):
  249. """Resize the image."""
  250. INPUT_KEYS = "img"
  251. OUTPUT_KEYS = ["img", "img_size", "scale_factors"]
  252. DEAULT_INPUTS = {"img": "img"}
  253. DEAULT_OUTPUTS = {
  254. "img": "img",
  255. "img_size": "img_size",
  256. "scale_factors": "scale_factors",
  257. }
  258. def __init__(
  259. self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
  260. ):
  261. """
  262. Initialize the instance.
  263. Args:
  264. target_size (list|tuple|int): Target width and height.
  265. keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
  266. image. Default: False.
  267. size_divisor (int|None, optional): Divisor of resized image size.
  268. Default: None.
  269. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  270. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  271. """
  272. super().__init__(size_divisor=size_divisor, interp=interp)
  273. if isinstance(target_size, int):
  274. target_size = [target_size, target_size]
  275. _check_image_size(target_size)
  276. self.target_size = target_size
  277. self.keep_ratio = keep_ratio
  278. def apply(self, img):
  279. """apply"""
  280. target_size = self.target_size
  281. original_size = img.shape[:2]
  282. if self.keep_ratio:
  283. h, w = img.shape[0:2]
  284. target_size, _ = self._rescale_size((w, h), self.target_size)
  285. if self.size_divisor:
  286. target_size = [
  287. math.ceil(i / self.size_divisor) * self.size_divisor
  288. for i in target_size
  289. ]
  290. img_scale_w, img_scale_h = [
  291. target_size[1] / original_size[1],
  292. target_size[0] / original_size[0],
  293. ]
  294. img = F.resize(img, target_size, interp=self.interp)
  295. return {
  296. "img": img,
  297. "img_size": [img.shape[1], img.shape[0]],
  298. "scale_factors": [img_scale_w, img_scale_h],
  299. }
  300. class ResizeByLong(_BaseResize):
  301. """
  302. Proportionally resize the image by specifying the target length of the
  303. longest side.
  304. """
  305. INPUT_KEYS = "img"
  306. OUTPUT_KEYS = ["img", "img_size"]
  307. DEAULT_INPUTS = {"img": "img"}
  308. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  309. def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
  310. """
  311. Initialize the instance.
  312. Args:
  313. target_long_edge (int): Target length of the longest side of image.
  314. size_divisor (int|None, optional): Divisor of resized image size.
  315. Default: None.
  316. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  317. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  318. """
  319. super().__init__(size_divisor=size_divisor, interp=interp)
  320. self.target_long_edge = target_long_edge
  321. def apply(self, img):
  322. """apply"""
  323. h, w = img.shape[:2]
  324. scale = self.target_long_edge / max(h, w)
  325. h_resize = round(h * scale)
  326. w_resize = round(w * scale)
  327. if self.size_divisor is not None:
  328. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  329. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  330. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  331. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  332. class ResizeByShort(_BaseResize):
  333. """
  334. Proportionally resize the image by specifying the target length of the
  335. shortest side.
  336. """
  337. INPUT_KEYS = "img"
  338. OUTPUT_KEYS = ["img", "img_size"]
  339. DEAULT_INPUTS = {"img": "img"}
  340. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  341. def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
  342. """
  343. Initialize the instance.
  344. Args:
  345. target_short_edge (int): Target length of the shortest side of image.
  346. size_divisor (int|None, optional): Divisor of resized image size.
  347. Default: None.
  348. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  349. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  350. """
  351. super().__init__(size_divisor=size_divisor, interp=interp)
  352. self.target_short_edge = target_short_edge
  353. def apply(self, img):
  354. """apply"""
  355. h, w = img.shape[:2]
  356. scale = self.target_short_edge / min(h, w)
  357. h_resize = round(h * scale)
  358. w_resize = round(w * scale)
  359. if self.size_divisor is not None:
  360. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  361. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  362. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  363. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  364. class Pad(BaseComponent):
  365. """Pad the image."""
  366. INPUT_KEYS = "img"
  367. OUTPUT_KEYS = ["img", "img_size"]
  368. DEAULT_INPUTS = {"img": "img"}
  369. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  370. def __init__(self, target_size, val=127.5):
  371. """
  372. Initialize the instance.
  373. Args:
  374. target_size (list|tuple|int): Target width and height of the image after
  375. padding.
  376. val (float, optional): Value to fill the padded area. Default: 127.5.
  377. """
  378. super().__init__()
  379. if isinstance(target_size, int):
  380. target_size = [target_size, target_size]
  381. _check_image_size(target_size)
  382. self.target_size = target_size
  383. self.val = val
  384. def apply(self, img):
  385. """apply"""
  386. h, w = img.shape[:2]
  387. tw, th = self.target_size
  388. ph = th - h
  389. pw = tw - w
  390. if ph < 0 or pw < 0:
  391. raise ValueError(
  392. f"Input image ({w}, {h}) smaller than the target size ({tw}, {th})."
  393. )
  394. else:
  395. img = F.pad(img, pad=(0, ph, 0, pw), val=self.val)
  396. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  397. class PadStride(BaseComponent):
  398. """padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
  399. Args:
  400. stride (bool): model with FPN need image shape % stride == 0
  401. """
  402. INPUT_KEYS = "img"
  403. OUTPUT_KEYS = "img"
  404. DEAULT_INPUTS = {"img": "img"}
  405. DEAULT_OUTPUTS = {"img": "img"}
  406. def __init__(self, stride=0):
  407. super().__init__()
  408. self.coarsest_stride = stride
  409. def apply(self, img):
  410. """
  411. Args:
  412. im (np.ndarray): image (np.ndarray)
  413. Returns:
  414. im (np.ndarray): processed image (np.ndarray)
  415. """
  416. im = img
  417. coarsest_stride = self.coarsest_stride
  418. if coarsest_stride <= 0:
  419. return {"img": im}
  420. im_c, im_h, im_w = im.shape
  421. pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
  422. pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
  423. padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
  424. padding_im[:, :im_h, :im_w] = im
  425. return {"img": padding_im}
  426. class Normalize(BaseComponent):
  427. """Normalize the image."""
  428. INPUT_KEYS = "img"
  429. OUTPUT_KEYS = "img"
  430. DEAULT_INPUTS = {"img": "img"}
  431. DEAULT_OUTPUTS = {"img": "img"}
  432. def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
  433. """
  434. Initialize the instance.
  435. Args:
  436. scale (float, optional): Scaling factor to apply to the image before
  437. applying normalization. Default: 1/255.
  438. mean (float|tuple|list, optional): Means for each channel of the image.
  439. Default: 0.5.
  440. std (float|tuple|list, optional): Standard deviations for each channel
  441. of the image. Default: 0.5.
  442. preserve_dtype (bool, optional): Whether to preserve the original dtype
  443. of the image.
  444. """
  445. super().__init__()
  446. self.scale = np.float32(scale)
  447. if isinstance(mean, float):
  448. mean = [mean]
  449. self.mean = np.asarray(mean).astype("float32")
  450. if isinstance(std, float):
  451. std = [std]
  452. self.std = np.asarray(std).astype("float32")
  453. self.preserve_dtype = preserve_dtype
  454. def apply(self, img):
  455. """apply"""
  456. old_type = img.dtype
  457. # XXX: If `old_type` has higher precision than float32,
  458. # we will lose some precision.
  459. img = img.astype("float32", copy=False)
  460. img *= self.scale
  461. img -= self.mean
  462. img /= self.std
  463. if self.preserve_dtype:
  464. img = img.astype(old_type, copy=False)
  465. return {"img": img}
  466. class ToCHWImage(BaseComponent):
  467. """Reorder the dimensions of the image from HWC to CHW."""
  468. INPUT_KEYS = "img"
  469. OUTPUT_KEYS = "img"
  470. DEAULT_INPUTS = {"img": "img"}
  471. DEAULT_OUTPUTS = {"img": "img"}
  472. def apply(self, img):
  473. """apply"""
  474. img = img.transpose((2, 0, 1))
  475. return {"img": img}