common.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import math
  15. from pathlib import Path
  16. from copy import deepcopy
  17. import numpy as np
  18. import cv2
  19. from skimage import measure, morphology
  20. from .....utils.cache import CACHE_DIR
  21. from ....utils.io import ImageReader, ImageWriter
  22. from ...utils.mixin import BatchSizeMixin
  23. from ...base import BaseComponent
  24. from ..read_data import _BaseRead
  25. from . import funcs as F
  26. __all__ = [
  27. "ReadImage",
  28. "Flip",
  29. "Crop",
  30. "Resize",
  31. "ResizeByLong",
  32. "ResizeByShort",
  33. "Pad",
  34. "Normalize",
  35. "ToCHWImage",
  36. "PadStride",
  37. ]
  38. def _check_image_size(input_):
  39. """check image size"""
  40. if not (
  41. isinstance(input_, (list, tuple))
  42. and len(input_) == 2
  43. and isinstance(input_[0], int)
  44. and isinstance(input_[1], int)
  45. ):
  46. raise TypeError(f"{input_} cannot represent a valid image size.")
  47. class ReadImage(_BaseRead):
  48. """Load image from the file."""
  49. INPUT_KEYS = ["img"]
  50. OUTPUT_KEYS = ["img", "img_size", "ori_img", "ori_img_size"]
  51. DEAULT_INPUTS = {"img": "img"}
  52. DEAULT_OUTPUTS = {
  53. "img": "img",
  54. "img_path": "img_path",
  55. "img_size": "img_size",
  56. "ori_img": "ori_img",
  57. "ori_img_size": "ori_img_size",
  58. }
  59. _FLAGS_DICT = {
  60. "BGR": cv2.IMREAD_COLOR,
  61. "RGB": cv2.IMREAD_COLOR,
  62. "GRAY": cv2.IMREAD_GRAYSCALE,
  63. }
  64. SUFFIX = ["jpg", "png", "jpeg", "JPEG", "JPG", "bmp"]
  65. def __init__(self, batch_size=1, format="BGR"):
  66. """
  67. Initialize the instance.
  68. Args:
  69. format (str, optional): Target color format to convert the image to.
  70. Choices are 'BGR', 'RGB', and 'GRAY'. Default: 'BGR'.
  71. """
  72. super().__init__(batch_size)
  73. self.format = format
  74. flags = self._FLAGS_DICT[self.format]
  75. self._reader = ImageReader(backend="opencv", flags=flags)
  76. self._writer = ImageWriter(backend="opencv")
  77. def apply(self, img):
  78. """apply"""
  79. if not isinstance(img, str):
  80. img_path = (Path(CACHE_DIR) / "predict_input" / "tmp_img.jpg").as_posix()
  81. self._writer.write(img_path, img)
  82. yield [
  83. {
  84. "img_path": img_path,
  85. "img": img,
  86. "img_size": [img.shape[1], img.shape[0]],
  87. "ori_img": deepcopy(img),
  88. "ori_img_size": deepcopy([img.shape[1], img.shape[0]]),
  89. }
  90. ]
  91. else:
  92. img_path = img
  93. img_path = self._download_from_url(img_path)
  94. file_list = self._get_files_list(img_path)
  95. batch = []
  96. for img_path in file_list:
  97. img = self._read_img(img_path)
  98. batch.append(img)
  99. if len(batch) >= self.batch_size:
  100. yield batch
  101. batch = []
  102. if len(batch) > 0:
  103. yield batch
  104. def _read_img(self, img_path):
  105. blob = self._reader.read(img_path)
  106. if blob is None:
  107. raise Exception("Image read Error")
  108. if self.format == "RGB":
  109. if blob.ndim != 3:
  110. raise RuntimeError("Array is not 3-dimensional.")
  111. # BGR to RGB
  112. blob = blob[..., ::-1]
  113. return {
  114. "img_path": img_path,
  115. "img": blob,
  116. "img_size": [blob.shape[1], blob.shape[0]],
  117. "ori_img": deepcopy(blob),
  118. "ori_img_size": deepcopy([blob.shape[1], blob.shape[0]]),
  119. }
  120. class GetImageInfo(BaseComponent):
  121. """Get Image Info"""
  122. INPUT_KEYS = "img"
  123. OUTPUT_KEYS = "img_size"
  124. DEAULT_INPUTS = {"img": "img"}
  125. DEAULT_OUTPUTS = {"img_size": "img_size"}
  126. def __init__(self):
  127. super().__init__()
  128. def apply(self, img):
  129. """apply"""
  130. return {"img_size": [img.shape[1], img.shape[0]]}
  131. class Flip(BaseComponent):
  132. """Flip the image vertically or horizontally."""
  133. INPUT_KEYS = "img"
  134. OUTPUT_KEYS = "img"
  135. DEAULT_INPUTS = {"img": "img"}
  136. DEAULT_OUTPUTS = {"img": "img"}
  137. def __init__(self, mode="H"):
  138. """
  139. Initialize the instance.
  140. Args:
  141. mode (str, optional): 'H' for horizontal flipping and 'V' for vertical
  142. flipping. Default: 'H'.
  143. """
  144. super().__init__()
  145. if mode not in ("H", "V"):
  146. raise ValueError("`mode` should be 'H' or 'V'.")
  147. self.mode = mode
  148. def apply(self, img):
  149. """apply"""
  150. if self.mode == "H":
  151. img = F.flip_h(img)
  152. elif self.mode == "V":
  153. img = F.flip_v(img)
  154. return {"img": img}
  155. class Crop(BaseComponent):
  156. """Crop region from the image."""
  157. INPUT_KEYS = "img"
  158. OUTPUT_KEYS = ["img", "img_size"]
  159. DEAULT_INPUTS = {"img": "img"}
  160. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  161. def __init__(self, crop_size, mode="C"):
  162. """
  163. Initialize the instance.
  164. Args:
  165. crop_size (list|tuple|int): Width and height of the region to crop.
  166. mode (str, optional): 'C' for cropping the center part and 'TL' for
  167. cropping the top left part. Default: 'C'.
  168. """
  169. super().__init__()
  170. if isinstance(crop_size, int):
  171. crop_size = [crop_size, crop_size]
  172. _check_image_size(crop_size)
  173. self.crop_size = crop_size
  174. if mode not in ("C", "TL"):
  175. raise ValueError("Unsupported interpolation method")
  176. self.mode = mode
  177. def apply(self, img):
  178. """apply"""
  179. h, w = img.shape[:2]
  180. cw, ch = self.crop_size
  181. if self.mode == "C":
  182. x1 = max(0, (w - cw) // 2)
  183. y1 = max(0, (h - ch) // 2)
  184. elif self.mode == "TL":
  185. x1, y1 = 0, 0
  186. x2 = min(w, x1 + cw)
  187. y2 = min(h, y1 + ch)
  188. coords = (x1, y1, x2, y2)
  189. if coords == (0, 0, w, h):
  190. raise ValueError(
  191. f"Input image ({w}, {h}) smaller than the target size ({cw}, {ch})."
  192. )
  193. img = F.slice(img, coords=coords)
  194. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  195. class _BaseResize(BaseComponent):
  196. _INTERP_DICT = {
  197. "NEAREST": cv2.INTER_NEAREST,
  198. "LINEAR": cv2.INTER_LINEAR,
  199. "CUBIC": cv2.INTER_CUBIC,
  200. "AREA": cv2.INTER_AREA,
  201. "LANCZOS4": cv2.INTER_LANCZOS4,
  202. }
  203. def __init__(self, size_divisor, interp):
  204. super().__init__()
  205. if size_divisor is not None:
  206. assert isinstance(
  207. size_divisor, int
  208. ), "`size_divisor` should be None or int."
  209. self.size_divisor = size_divisor
  210. try:
  211. interp = self._INTERP_DICT[interp]
  212. except KeyError:
  213. raise ValueError(
  214. "`interp` should be one of {}.".format(self._INTERP_DICT.keys())
  215. )
  216. self.interp = interp
  217. @staticmethod
  218. def _rescale_size(img_size, target_size):
  219. """rescale size"""
  220. scale = min(max(target_size) / max(img_size), min(target_size) / min(img_size))
  221. rescaled_size = [round(i * scale) for i in img_size]
  222. return rescaled_size, scale
  223. class Resize(_BaseResize):
  224. """Resize the image."""
  225. INPUT_KEYS = "img"
  226. OUTPUT_KEYS = ["img", "img_size", "scale_factors"]
  227. DEAULT_INPUTS = {"img": "img"}
  228. DEAULT_OUTPUTS = {
  229. "img": "img",
  230. "img_size": "img_size",
  231. "scale_factors": "scale_factors",
  232. }
  233. def __init__(
  234. self, target_size, keep_ratio=False, size_divisor=None, interp="LINEAR"
  235. ):
  236. """
  237. Initialize the instance.
  238. Args:
  239. target_size (list|tuple|int): Target width and height.
  240. keep_ratio (bool, optional): Whether to keep the aspect ratio of resized
  241. image. Default: False.
  242. size_divisor (int|None, optional): Divisor of resized image size.
  243. Default: None.
  244. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  245. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  246. """
  247. super().__init__(size_divisor=size_divisor, interp=interp)
  248. if isinstance(target_size, int):
  249. target_size = [target_size, target_size]
  250. _check_image_size(target_size)
  251. self.target_size = target_size
  252. self.keep_ratio = keep_ratio
  253. def apply(self, img):
  254. """apply"""
  255. target_size = self.target_size
  256. original_size = img.shape[:2]
  257. if self.keep_ratio:
  258. h, w = img.shape[0:2]
  259. target_size, _ = self._rescale_size((w, h), self.target_size)
  260. if self.size_divisor:
  261. target_size = [
  262. math.ceil(i / self.size_divisor) * self.size_divisor
  263. for i in target_size
  264. ]
  265. img_scale_w, img_scale_h = [
  266. target_size[1] / original_size[1],
  267. target_size[0] / original_size[0],
  268. ]
  269. img = F.resize(img, target_size, interp=self.interp)
  270. return {
  271. "img": img,
  272. "img_size": [img.shape[1], img.shape[0]],
  273. "scale_factors": [img_scale_w, img_scale_h],
  274. }
  275. class ResizeByLong(_BaseResize):
  276. """
  277. Proportionally resize the image by specifying the target length of the
  278. longest side.
  279. """
  280. INPUT_KEYS = "img"
  281. OUTPUT_KEYS = ["img", "img_size"]
  282. DEAULT_INPUTS = {"img": "img"}
  283. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  284. def __init__(self, target_long_edge, size_divisor=None, interp="LINEAR"):
  285. """
  286. Initialize the instance.
  287. Args:
  288. target_long_edge (int): Target length of the longest side of image.
  289. size_divisor (int|None, optional): Divisor of resized image size.
  290. Default: None.
  291. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  292. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  293. """
  294. super().__init__(size_divisor=size_divisor, interp=interp)
  295. self.target_long_edge = target_long_edge
  296. def apply(self, img):
  297. """apply"""
  298. h, w = img.shape[:2]
  299. scale = self.target_long_edge / max(h, w)
  300. h_resize = round(h * scale)
  301. w_resize = round(w * scale)
  302. if self.size_divisor is not None:
  303. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  304. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  305. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  306. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  307. class ResizeByShort(_BaseResize):
  308. """
  309. Proportionally resize the image by specifying the target length of the
  310. shortest side.
  311. """
  312. INPUT_KEYS = "img"
  313. OUTPUT_KEYS = ["img", "img_size"]
  314. DEAULT_INPUTS = {"img": "img"}
  315. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  316. def __init__(self, target_short_edge, size_divisor=None, interp="LINEAR"):
  317. """
  318. Initialize the instance.
  319. Args:
  320. target_short_edge (int): Target length of the shortest side of image.
  321. size_divisor (int|None, optional): Divisor of resized image size.
  322. Default: None.
  323. interp (str, optional): Interpolation method. Choices are 'NEAREST',
  324. 'LINEAR', 'CUBIC', 'AREA', and 'LANCZOS4'. Default: 'LINEAR'.
  325. """
  326. super().__init__(size_divisor=size_divisor, interp=interp)
  327. self.target_short_edge = target_short_edge
  328. def apply(self, img):
  329. """apply"""
  330. h, w = img.shape[:2]
  331. scale = self.target_short_edge / min(h, w)
  332. h_resize = round(h * scale)
  333. w_resize = round(w * scale)
  334. if self.size_divisor is not None:
  335. h_resize = math.ceil(h_resize / self.size_divisor) * self.size_divisor
  336. w_resize = math.ceil(w_resize / self.size_divisor) * self.size_divisor
  337. img = F.resize(img, (w_resize, h_resize), interp=self.interp)
  338. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  339. class Pad(BaseComponent):
  340. """Pad the image."""
  341. INPUT_KEYS = "img"
  342. OUTPUT_KEYS = ["img", "img_size"]
  343. DEAULT_INPUTS = {"img": "img"}
  344. DEAULT_OUTPUTS = {"img": "img", "img_size": "img_size"}
  345. def __init__(self, target_size, val=127.5):
  346. """
  347. Initialize the instance.
  348. Args:
  349. target_size (list|tuple|int): Target width and height of the image after
  350. padding.
  351. val (float, optional): Value to fill the padded area. Default: 127.5.
  352. """
  353. super().__init__()
  354. if isinstance(target_size, int):
  355. target_size = [target_size, target_size]
  356. _check_image_size(target_size)
  357. self.target_size = target_size
  358. self.val = val
  359. def apply(self, img):
  360. """apply"""
  361. h, w = img.shape[:2]
  362. tw, th = self.target_size
  363. ph = th - h
  364. pw = tw - w
  365. if ph < 0 or pw < 0:
  366. raise ValueError(
  367. f"Input image ({w}, {h}) smaller than the target size ({tw}, {th})."
  368. )
  369. else:
  370. img = F.pad(img, pad=(0, ph, 0, pw), val=self.val)
  371. return {"img": img, "img_size": [img.shape[1], img.shape[0]]}
  372. class PadStride(BaseComponent):
  373. """padding image for model with FPN , instead PadBatch(pad_to_stride, pad_gt) in original config
  374. Args:
  375. stride (bool): model with FPN need image shape % stride == 0
  376. """
  377. INPUT_KEYS = "img"
  378. OUTPUT_KEYS = "img"
  379. DEAULT_INPUTS = {"img": "img"}
  380. DEAULT_OUTPUTS = {"img": "img"}
  381. def __init__(self, stride=0):
  382. super().__init__()
  383. self.coarsest_stride = stride
  384. def apply(self, img):
  385. """
  386. Args:
  387. im (np.ndarray): image (np.ndarray)
  388. Returns:
  389. im (np.ndarray): processed image (np.ndarray)
  390. """
  391. im = img
  392. coarsest_stride = self.coarsest_stride
  393. if coarsest_stride <= 0:
  394. return {"img": im}
  395. im_c, im_h, im_w = im.shape
  396. pad_h = int(np.ceil(float(im_h) / coarsest_stride) * coarsest_stride)
  397. pad_w = int(np.ceil(float(im_w) / coarsest_stride) * coarsest_stride)
  398. padding_im = np.zeros((im_c, pad_h, pad_w), dtype=np.float32)
  399. padding_im[:, :im_h, :im_w] = im
  400. return {"img": padding_im}
  401. class Normalize(BaseComponent):
  402. """Normalize the image."""
  403. INPUT_KEYS = "img"
  404. OUTPUT_KEYS = "img"
  405. DEAULT_INPUTS = {"img": "img"}
  406. DEAULT_OUTPUTS = {"img": "img"}
  407. def __init__(self, scale=1.0 / 255, mean=0.5, std=0.5, preserve_dtype=False):
  408. """
  409. Initialize the instance.
  410. Args:
  411. scale (float, optional): Scaling factor to apply to the image before
  412. applying normalization. Default: 1/255.
  413. mean (float|tuple|list, optional): Means for each channel of the image.
  414. Default: 0.5.
  415. std (float|tuple|list, optional): Standard deviations for each channel
  416. of the image. Default: 0.5.
  417. preserve_dtype (bool, optional): Whether to preserve the original dtype
  418. of the image.
  419. """
  420. super().__init__()
  421. self.scale = np.float32(scale)
  422. if isinstance(mean, float):
  423. mean = [mean]
  424. self.mean = np.asarray(mean).astype("float32")
  425. if isinstance(std, float):
  426. std = [std]
  427. self.std = np.asarray(std).astype("float32")
  428. self.preserve_dtype = preserve_dtype
  429. def apply(self, img):
  430. """apply"""
  431. old_type = img.dtype
  432. # XXX: If `old_type` has higher precision than float32,
  433. # we will lose some precision.
  434. img = img.astype("float32", copy=False)
  435. img *= self.scale
  436. img -= self.mean
  437. img /= self.std
  438. if self.preserve_dtype:
  439. img = img.astype(old_type, copy=False)
  440. return {"img": img}
  441. class ToCHWImage(BaseComponent):
  442. """Reorder the dimensions of the image from HWC to CHW."""
  443. INPUT_KEYS = "img"
  444. OUTPUT_KEYS = "img"
  445. DEAULT_INPUTS = {"img": "img"}
  446. DEAULT_OUTPUTS = {"img": "img"}
  447. def apply(self, img):
  448. """apply"""
  449. img = img.transpose((2, 0, 1))
  450. return {"img": img}
  451. class Map_to_mask(BaseComponent):
  452. """Map_to_mask"""
  453. INPUT_KEYS = "pred"
  454. OUTPUT_KEYS = "pred"
  455. DEAULT_INPUTS = {"pred": "pred"}
  456. DEAULT_OUTPUTS = {"pred": "pred"}
  457. def apply(self, pred):
  458. """apply"""
  459. # from skimage import measure, morphology
  460. # import cv2
  461. # from PIL import Image
  462. # import numpy as np
  463. # import imageio
  464. score_map = pred[0]
  465. thred = 0.01
  466. mask = score_map[0]
  467. mask[mask > thred] = 255
  468. mask[mask <= thred] = 0
  469. kernel = morphology.disk(4)
  470. mask = morphology.opening(mask, kernel)
  471. mask = mask.astype(np.uint8)
  472. return {"pred": mask[None, :, :]}