processors.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import os.path as osp
  16. from typing import List, Sequence, Union, Optional, Tuple
  17. import re
  18. import numpy as np
  19. import cv2
  20. import math
  21. import json
  22. import tempfile
  23. import lazy_paddle
  24. from ...utils.benchmark import benchmark
  25. class Scale:
  26. """Scale images."""
  27. def __init__(
  28. self,
  29. short_size: int,
  30. fixed_ratio: bool = True,
  31. keep_ratio: Union[bool, None] = None,
  32. do_round: bool = False,
  33. ) -> None:
  34. """
  35. Initializes the Scale class.
  36. Args:
  37. short_size (int): The target size for the shorter side of the image.
  38. fixed_ratio (bool): Whether to maintain a fixed aspect ratio of 4:3.
  39. keep_ratio (Union[bool, None]): Whether to keep the aspect ratio. Cannot be True if fixed_ratio is True.
  40. do_round (bool): Whether to round the scaling factor.
  41. """
  42. super().__init__()
  43. self.short_size = short_size
  44. assert (fixed_ratio and not keep_ratio) or (
  45. not fixed_ratio
  46. ), f"fixed_ratio and keep_ratio cannot be true at the same time"
  47. self.fixed_ratio = fixed_ratio
  48. self.keep_ratio = keep_ratio
  49. self.do_round = do_round
  50. def scale(self, video: List[np.ndarray]) -> List[np.ndarray]:
  51. """
  52. Performs resize operations on a sequence of images.
  53. Args:
  54. video (List[np.ndarray]): List where each item is an image, as a numpy array.
  55. For example, [np.ndarray0, np.ndarray1, np.ndarray2, ...]
  56. Returns:
  57. List[np.ndarray]: List where each item is a np.ndarray after scaling.
  58. """
  59. imgs = video
  60. resized_imgs = []
  61. for i in range(len(imgs)):
  62. img = imgs[i]
  63. if isinstance(img, np.ndarray):
  64. h, w, _ = img.shape
  65. else:
  66. raise NotImplementedError
  67. if (w <= h and w == self.short_size) or (h <= w and h == self.short_size):
  68. resized_imgs.append(img)
  69. continue
  70. if w <= h:
  71. ow = self.short_size
  72. if self.fixed_ratio:
  73. oh = int(self.short_size * 4.0 / 3.0)
  74. elif self.keep_ratio is False:
  75. oh = self.short_size
  76. else:
  77. scale_factor = self.short_size / w
  78. oh = (
  79. int(h * float(scale_factor) + 0.5)
  80. if self.do_round
  81. else int(h * self.short_size / w)
  82. )
  83. ow = (
  84. int(w * float(scale_factor) + 0.5)
  85. if self.do_round
  86. else self.short_size
  87. )
  88. else:
  89. oh = self.short_size
  90. if self.fixed_ratio:
  91. ow = int(self.short_size * 4.0 / 3.0)
  92. elif self.keep_ratio is False:
  93. ow = self.short_size
  94. else:
  95. scale_factor = self.short_size / h
  96. oh = (
  97. int(h * float(scale_factor) + 0.5)
  98. if self.do_round
  99. else self.short_size
  100. )
  101. ow = (
  102. int(w * float(scale_factor) + 0.5)
  103. if self.do_round
  104. else int(w * self.short_size / h)
  105. )
  106. resized_imgs.append(
  107. cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
  108. )
  109. imgs = resized_imgs
  110. return imgs
  111. @benchmark.timeit
  112. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  113. """
  114. Apply the scaling operation to a list of videos.
  115. Args:
  116. videos (List[np.ndarray]): A list of videos, where each video is a sequence
  117. of images.
  118. Returns:
  119. List[np.ndarray]: A list of videos after scaling, where each video is a list of images.
  120. """
  121. return [self.scale(video) for video in videos]
  122. class CenterCrop:
  123. """Center crop images."""
  124. def __init__(self, target_size: int, do_round: bool = True) -> None:
  125. """
  126. Initializes the CenterCrop class.
  127. Args:
  128. target_size (int): The size of the cropped area.
  129. do_round (bool): Whether to round the crop coordinates.
  130. """
  131. super().__init__()
  132. self.target_size = target_size
  133. self.do_round = do_round
  134. def center_crop(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  135. """
  136. Performs center crop operations on images.
  137. Args:
  138. imgs (List[np.ndarray]): A sequence of images (a numpy array).
  139. Returns:
  140. List[np.ndarray]: A list of images after center cropping or a cropped numpy array.
  141. """
  142. crop_imgs = []
  143. th, tw = self.target_size, self.target_size
  144. if isinstance(imgs, lazy_paddle.Tensor):
  145. h, w = imgs.shape[-2:]
  146. x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
  147. y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
  148. crop_imgs = imgs[:, :, y1 : y1 + th, x1 : x1 + tw]
  149. else:
  150. for img in imgs:
  151. h, w, _ = img.shape
  152. assert (w >= self.target_size) and (
  153. h >= self.target_size
  154. ), "image width({}) and height({}) should be larger than crop size".format(
  155. w, h, self.target_size
  156. )
  157. x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
  158. y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
  159. crop_imgs.append(img[y1 : y1 + th, x1 : x1 + tw])
  160. return crop_imgs
  161. @benchmark.timeit
  162. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  163. """
  164. Apply the center crop operation to a list of videos.
  165. Args:
  166. videos (List[np.ndarray]): A list of videos, where each video is a sequence of images.
  167. Returns:
  168. List[np.ndarray]: A list of videos after center cropping.
  169. """
  170. return [self.center_crop(video) for video in videos]
  171. class Image2Array:
  172. """Convert a sequence of images to a numpy array with optional transposition."""
  173. def __init__(self, transpose: bool = True, data_format: str = "tchw") -> None:
  174. """
  175. Initializes the Image2Array class.
  176. Args:
  177. transpose (bool): Whether to transpose the resulting numpy array.
  178. data_format (str): The format to transpose to, either 'tchw' or 'cthw'.
  179. Raises:
  180. AssertionError: If data_format is not one of the allowed values.
  181. """
  182. super().__init__()
  183. assert data_format in [
  184. "tchw",
  185. "cthw",
  186. ], f"Target format must in ['tchw', 'cthw'], but got {data_format}"
  187. self.transpose = transpose
  188. self.data_format = data_format
  189. def img2array(self, imgs: List[np.ndarray]) -> np.ndarray:
  190. """
  191. Converts a sequence of images to a numpy array and optionally transposes it.
  192. Args:
  193. imgs (List[np.ndarray]): A list of images to be converted to a numpy array.
  194. Returns:
  195. np.ndarray: A numpy array representation of the images.
  196. """
  197. t_imgs = np.stack(imgs).astype("float32")
  198. if self.transpose:
  199. if self.data_format == "tchw":
  200. t_imgs = t_imgs.transpose([0, 3, 1, 2]) # tchw
  201. else:
  202. t_imgs = t_imgs.transpose([3, 0, 1, 2]) # cthw
  203. return t_imgs
  204. @benchmark.timeit
  205. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  206. """
  207. Apply the image to array conversion to a list of videos.
  208. Args:
  209. videos (List[Sequence[np.ndarray]]): A list of videos, where each video is a sequence of images.
  210. Returns:
  211. List[np.ndarray]: A list of numpy arrays, one for each video.
  212. """
  213. return [self.img2array(video) for video in videos]
  214. class NormalizeVideo:
  215. """
  216. Normalize video frames by subtracting the mean and dividing by the standard deviation.
  217. """
  218. def __init__(
  219. self,
  220. mean: Sequence[float],
  221. std: Sequence[float],
  222. tensor_shape: Sequence[int] = [3, 1, 1],
  223. inplace: bool = False,
  224. ) -> None:
  225. """
  226. Initializes the NormalizeVideo class.
  227. Args:
  228. mean (Sequence[float]): The mean values for each channel.
  229. std (Sequence[float]): The standard deviation values for each channel.
  230. tensor_shape (Sequence[int]): The shape of the mean and std tensors.
  231. inplace (bool): Whether to perform normalization in place.
  232. """
  233. super().__init__()
  234. self.inplace = inplace
  235. if not inplace:
  236. self.mean = np.array(mean).reshape(tensor_shape).astype(np.float32)
  237. self.std = np.array(std).reshape(tensor_shape).astype(np.float32)
  238. else:
  239. self.mean = np.array(mean, dtype=np.float32)
  240. self.std = np.array(std, dtype=np.float32)
  241. def normalize_video(self, imgs: np.ndarray) -> np.ndarray:
  242. """
  243. Normalizes a sequence of images.
  244. Args:
  245. imgs (np.ndarray): A numpy array of images to be normalized.
  246. Returns:
  247. np.ndarray: The normalized images as a numpy array.
  248. """
  249. if self.inplace:
  250. n = len(imgs)
  251. h, w, c = imgs[0].shape
  252. norm_imgs = np.empty((n, h, w, c), dtype=np.float32)
  253. for i, img in enumerate(imgs):
  254. norm_imgs[i] = img
  255. for img in norm_imgs: # [n,h,w,c]
  256. mean = np.float64(self.mean.reshape(1, -1)) # [1, 3]
  257. stdinv = 1 / np.float64(self.std.reshape(1, -1)) # [1, 3]
  258. cv2.subtract(img, mean, img)
  259. cv2.multiply(img, stdinv, img)
  260. else:
  261. imgs = imgs
  262. norm_imgs = imgs / 255.0
  263. norm_imgs -= self.mean
  264. norm_imgs /= self.std
  265. imgs = norm_imgs
  266. imgs = np.expand_dims(imgs, axis=0).copy()
  267. return imgs
  268. @benchmark.timeit
  269. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  270. """
  271. Apply normalization to a list of videos.
  272. Args:
  273. videos (List[np.ndarray]): A list of videos, where each video is a numpy array of images.
  274. Returns:
  275. List[np.ndarray]: A list of normalized videos as numpy arrays.
  276. """
  277. return [self.normalize_video(video) for video in videos]
  278. class VideoClasTopk:
  279. """Applies a top-k transformation on video classification predictions."""
  280. def __init__(self, class_ids: Optional[Sequence[Union[str, int]]] = None) -> None:
  281. """
  282. Initializes the VideoClasTopk class.
  283. Args:
  284. class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels corresponding to class indices.
  285. """
  286. super().__init__()
  287. self.class_id_map = self._parse_class_id_map(class_ids)
  288. def softmax(self, data: np.ndarray) -> np.ndarray:
  289. """
  290. Applies the softmax function to an array of data.
  291. Args:
  292. data (np.ndarray): An array of data for which to compute softmax.
  293. Returns:
  294. np.ndarray: The softmax-transformed data.
  295. """
  296. x_max = np.max(data, axis=-1, keepdims=True)
  297. e_x = np.exp(data - x_max)
  298. return e_x / np.sum(e_x, axis=-1, keepdims=True)
  299. def _parse_class_id_map(
  300. self, class_ids: Optional[Sequence[Union[str, int]]]
  301. ) -> Optional[dict]:
  302. """
  303. Parses a list of class IDs into a mapping from class index to class label.
  304. Args:
  305. class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels.
  306. Returns:
  307. Optional[dict]: A dictionary mapping class indices to labels, or None if no class_ids are provided.
  308. """
  309. if class_ids is None:
  310. return None
  311. class_id_map = {id: str(lb) for id, lb in enumerate(class_ids)}
  312. return class_id_map
  313. @benchmark.timeit
  314. def __call__(
  315. self, preds: np.ndarray, topk: int = 5
  316. ) -> Tuple[np.ndarray, List[np.ndarray], List[List[str]]]:
  317. """
  318. Selects the top-k predictions from the classification output.
  319. Args:
  320. preds (np.ndarray): A 2D array of prediction scores.
  321. topk (int): The number of top predictions to return.
  322. Returns:
  323. Tuple[np.ndarray, List[np.ndarray], List[List[str]]]: A tuple containing:
  324. - An array of indices of the top-k predictions.
  325. - A list of arrays of scores for the top-k predictions.
  326. - A list of lists of label names for the top-k predictions.
  327. """
  328. preds[0] = self.softmax(preds[0])
  329. indexes = preds[0].argsort(axis=1)[:, -topk:][:, ::-1].astype("int32")
  330. scores = [
  331. list(np.around(pred[index], decimals=5))
  332. for pred, index in zip(preds[0], indexes)
  333. ]
  334. label_names = [[self.class_id_map[i] for i in index] for index in indexes]
  335. return indexes, scores, label_names
  336. class ToBatch:
  337. """A class for batching videos."""
  338. @benchmark.timeit
  339. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  340. """Call method to stack videos into a batch.
  341. Args:
  342. videos (list of np.ndarrays): List of videos to process.
  343. Returns:
  344. list of np.ndarrays: List containing a stacked tensor of the videos.
  345. """
  346. return [np.concatenate(videos, axis=0).astype(dtype=np.float32, copy=False)]