processors.py 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import os.path as osp
  16. from typing import List, Sequence, Union, Optional, Tuple
  17. import re
  18. import numpy as np
  19. import cv2
  20. import math
  21. import json
  22. import tempfile
  23. import lazy_paddle
  24. class Scale:
  25. """Scale images."""
  26. def __init__(
  27. self,
  28. short_size: int,
  29. fixed_ratio: bool = True,
  30. keep_ratio: Union[bool, None] = None,
  31. do_round: bool = False,
  32. ) -> None:
  33. """
  34. Initializes the Scale class.
  35. Args:
  36. short_size (int): The target size for the shorter side of the image.
  37. fixed_ratio (bool): Whether to maintain a fixed aspect ratio of 4:3.
  38. keep_ratio (Union[bool, None]): Whether to keep the aspect ratio. Cannot be True if fixed_ratio is True.
  39. do_round (bool): Whether to round the scaling factor.
  40. """
  41. super().__init__()
  42. self.short_size = short_size
  43. assert (fixed_ratio and not keep_ratio) or (
  44. not fixed_ratio
  45. ), f"fixed_ratio and keep_ratio cannot be true at the same time"
  46. self.fixed_ratio = fixed_ratio
  47. self.keep_ratio = keep_ratio
  48. self.do_round = do_round
  49. def scale(self, video: List[np.ndarray]) -> List[np.ndarray]:
  50. """
  51. Performs resize operations on a sequence of images.
  52. Args:
  53. video (List[np.ndarray]): List where each item is an image, as a numpy array.
  54. For example, [np.ndarray0, np.ndarray1, np.ndarray2, ...]
  55. Returns:
  56. List[np.ndarray]: List where each item is a np.ndarray after scaling.
  57. """
  58. imgs = video
  59. resized_imgs = []
  60. for i in range(len(imgs)):
  61. img = imgs[i]
  62. if isinstance(img, np.ndarray):
  63. h, w, _ = img.shape
  64. else:
  65. raise NotImplementedError
  66. if (w <= h and w == self.short_size) or (h <= w and h == self.short_size):
  67. resized_imgs.append(img)
  68. continue
  69. if w <= h:
  70. ow = self.short_size
  71. if self.fixed_ratio:
  72. oh = int(self.short_size * 4.0 / 3.0)
  73. elif self.keep_ratio is False:
  74. oh = self.short_size
  75. else:
  76. scale_factor = self.short_size / w
  77. oh = (
  78. int(h * float(scale_factor) + 0.5)
  79. if self.do_round
  80. else int(h * self.short_size / w)
  81. )
  82. ow = (
  83. int(w * float(scale_factor) + 0.5)
  84. if self.do_round
  85. else self.short_size
  86. )
  87. else:
  88. oh = self.short_size
  89. if self.fixed_ratio:
  90. ow = int(self.short_size * 4.0 / 3.0)
  91. elif self.keep_ratio is False:
  92. ow = self.short_size
  93. else:
  94. scale_factor = self.short_size / h
  95. oh = (
  96. int(h * float(scale_factor) + 0.5)
  97. if self.do_round
  98. else self.short_size
  99. )
  100. ow = (
  101. int(w * float(scale_factor) + 0.5)
  102. if self.do_round
  103. else int(w * self.short_size / h)
  104. )
  105. resized_imgs.append(
  106. cv2.resize(img, (ow, oh), interpolation=cv2.INTER_LINEAR)
  107. )
  108. imgs = resized_imgs
  109. return imgs
  110. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  111. """
  112. Apply the scaling operation to a list of videos.
  113. Args:
  114. videos (List[np.ndarray]): A list of videos, where each video is a sequence
  115. of images.
  116. Returns:
  117. List[np.ndarray]: A list of videos after scaling, where each video is a list of images.
  118. """
  119. return [self.scale(video) for video in videos]
  120. class CenterCrop:
  121. """Center crop images."""
  122. def __init__(self, target_size: int, do_round: bool = True) -> None:
  123. """
  124. Initializes the CenterCrop class.
  125. Args:
  126. target_size (int): The size of the cropped area.
  127. do_round (bool): Whether to round the crop coordinates.
  128. """
  129. super().__init__()
  130. self.target_size = target_size
  131. self.do_round = do_round
  132. def center_crop(self, imgs: List[np.ndarray]) -> List[np.ndarray]:
  133. """
  134. Performs center crop operations on images.
  135. Args:
  136. imgs (List[np.ndarray]): A sequence of images (a numpy array).
  137. Returns:
  138. List[np.ndarray]: A list of images after center cropping or a cropped numpy array.
  139. """
  140. crop_imgs = []
  141. th, tw = self.target_size, self.target_size
  142. if isinstance(imgs, lazy_paddle.Tensor):
  143. h, w = imgs.shape[-2:]
  144. x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
  145. y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
  146. crop_imgs = imgs[:, :, y1 : y1 + th, x1 : x1 + tw]
  147. else:
  148. for img in imgs:
  149. h, w, _ = img.shape
  150. assert (w >= self.target_size) and (
  151. h >= self.target_size
  152. ), "image width({}) and height({}) should be larger than crop size".format(
  153. w, h, self.target_size
  154. )
  155. x1 = int(round((w - tw) / 2.0)) if self.do_round else (w - tw) // 2
  156. y1 = int(round((h - th) / 2.0)) if self.do_round else (h - th) // 2
  157. crop_imgs.append(img[y1 : y1 + th, x1 : x1 + tw])
  158. return crop_imgs
  159. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  160. """
  161. Apply the center crop operation to a list of videos.
  162. Args:
  163. videos (List[np.ndarray]): A list of videos, where each video is a sequence of images.
  164. Returns:
  165. List[np.ndarray]: A list of videos after center cropping.
  166. """
  167. return [self.center_crop(video) for video in videos]
  168. class Image2Array:
  169. """Convert a sequence of images to a numpy array with optional transposition."""
  170. def __init__(self, transpose: bool = True, data_format: str = "tchw") -> None:
  171. """
  172. Initializes the Image2Array class.
  173. Args:
  174. transpose (bool): Whether to transpose the resulting numpy array.
  175. data_format (str): The format to transpose to, either 'tchw' or 'cthw'.
  176. Raises:
  177. AssertionError: If data_format is not one of the allowed values.
  178. """
  179. super().__init__()
  180. assert data_format in [
  181. "tchw",
  182. "cthw",
  183. ], f"Target format must in ['tchw', 'cthw'], but got {data_format}"
  184. self.transpose = transpose
  185. self.data_format = data_format
  186. def img2array(self, imgs: List[np.ndarray]) -> np.ndarray:
  187. """
  188. Converts a sequence of images to a numpy array and optionally transposes it.
  189. Args:
  190. imgs (List[np.ndarray]): A list of images to be converted to a numpy array.
  191. Returns:
  192. np.ndarray: A numpy array representation of the images.
  193. """
  194. t_imgs = np.stack(imgs).astype("float32")
  195. if self.transpose:
  196. if self.data_format == "tchw":
  197. t_imgs = t_imgs.transpose([0, 3, 1, 2]) # tchw
  198. else:
  199. t_imgs = t_imgs.transpose([3, 0, 1, 2]) # cthw
  200. return t_imgs
  201. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  202. """
  203. Apply the image to array conversion to a list of videos.
  204. Args:
  205. videos (List[Sequence[np.ndarray]]): A list of videos, where each video is a sequence of images.
  206. Returns:
  207. List[np.ndarray]: A list of numpy arrays, one for each video.
  208. """
  209. return [self.img2array(video) for video in videos]
  210. class NormalizeVideo:
  211. """
  212. Normalize video frames by subtracting the mean and dividing by the standard deviation.
  213. """
  214. def __init__(
  215. self,
  216. mean: Sequence[float],
  217. std: Sequence[float],
  218. tensor_shape: Sequence[int] = [3, 1, 1],
  219. inplace: bool = False,
  220. ) -> None:
  221. """
  222. Initializes the NormalizeVideo class.
  223. Args:
  224. mean (Sequence[float]): The mean values for each channel.
  225. std (Sequence[float]): The standard deviation values for each channel.
  226. tensor_shape (Sequence[int]): The shape of the mean and std tensors.
  227. inplace (bool): Whether to perform normalization in place.
  228. """
  229. super().__init__()
  230. self.inplace = inplace
  231. if not inplace:
  232. self.mean = np.array(mean).reshape(tensor_shape).astype(np.float32)
  233. self.std = np.array(std).reshape(tensor_shape).astype(np.float32)
  234. else:
  235. self.mean = np.array(mean, dtype=np.float32)
  236. self.std = np.array(std, dtype=np.float32)
  237. def normalize_video(self, imgs: np.ndarray) -> np.ndarray:
  238. """
  239. Normalizes a sequence of images.
  240. Args:
  241. imgs (np.ndarray): A numpy array of images to be normalized.
  242. Returns:
  243. np.ndarray: The normalized images as a numpy array.
  244. """
  245. if self.inplace:
  246. n = len(imgs)
  247. h, w, c = imgs[0].shape
  248. norm_imgs = np.empty((n, h, w, c), dtype=np.float32)
  249. for i, img in enumerate(imgs):
  250. norm_imgs[i] = img
  251. for img in norm_imgs: # [n,h,w,c]
  252. mean = np.float64(self.mean.reshape(1, -1)) # [1, 3]
  253. stdinv = 1 / np.float64(self.std.reshape(1, -1)) # [1, 3]
  254. cv2.subtract(img, mean, img)
  255. cv2.multiply(img, stdinv, img)
  256. else:
  257. imgs = imgs
  258. norm_imgs = imgs / 255.0
  259. norm_imgs -= self.mean
  260. norm_imgs /= self.std
  261. imgs = norm_imgs
  262. imgs = np.expand_dims(imgs, axis=0).copy()
  263. return imgs
  264. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  265. """
  266. Apply normalization to a list of videos.
  267. Args:
  268. videos (List[np.ndarray]): A list of videos, where each video is a numpy array of images.
  269. Returns:
  270. List[np.ndarray]: A list of normalized videos as numpy arrays.
  271. """
  272. return [self.normalize_video(video) for video in videos]
  273. class VideoClasTopk:
  274. """Applies a top-k transformation on video classification predictions."""
  275. def __init__(self, class_ids: Optional[Sequence[Union[str, int]]] = None) -> None:
  276. """
  277. Initializes the VideoClasTopk class.
  278. Args:
  279. class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels corresponding to class indices.
  280. """
  281. super().__init__()
  282. self.class_id_map = self._parse_class_id_map(class_ids)
  283. def softmax(self, data: np.ndarray) -> np.ndarray:
  284. """
  285. Applies the softmax function to an array of data.
  286. Args:
  287. data (np.ndarray): An array of data for which to compute softmax.
  288. Returns:
  289. np.ndarray: The softmax-transformed data.
  290. """
  291. x_max = np.max(data, axis=-1, keepdims=True)
  292. e_x = np.exp(data - x_max)
  293. return e_x / np.sum(e_x, axis=-1, keepdims=True)
  294. def _parse_class_id_map(
  295. self, class_ids: Optional[Sequence[Union[str, int]]]
  296. ) -> Optional[dict]:
  297. """
  298. Parses a list of class IDs into a mapping from class index to class label.
  299. Args:
  300. class_ids (Optional[Sequence[Union[str, int]]]): A list of class labels.
  301. Returns:
  302. Optional[dict]: A dictionary mapping class indices to labels, or None if no class_ids are provided.
  303. """
  304. if class_ids is None:
  305. return None
  306. class_id_map = {id: str(lb) for id, lb in enumerate(class_ids)}
  307. return class_id_map
  308. def __call__(
  309. self, preds: np.ndarray, topk: int = 5
  310. ) -> Tuple[np.ndarray, List[np.ndarray], List[List[str]]]:
  311. """
  312. Selects the top-k predictions from the classification output.
  313. Args:
  314. preds (np.ndarray): A 2D array of prediction scores.
  315. topk (int): The number of top predictions to return.
  316. Returns:
  317. Tuple[np.ndarray, List[np.ndarray], List[List[str]]]: A tuple containing:
  318. - An array of indices of the top-k predictions.
  319. - A list of arrays of scores for the top-k predictions.
  320. - A list of lists of label names for the top-k predictions.
  321. """
  322. preds[0] = self.softmax(preds[0])
  323. indexes = preds[0].argsort(axis=1)[:, -topk:][:, ::-1].astype("int32")
  324. scores = [
  325. np.around(pred[index], decimals=5) for pred, index in zip(preds[0], indexes)
  326. ]
  327. label_names = [[self.class_id_map[i] for i in index] for index in indexes]
  328. return indexes, scores, label_names
  329. class ToBatch:
  330. """A class for batching videos."""
  331. def __call__(self, videos: List[np.ndarray]) -> List[np.ndarray]:
  332. """Call method to stack videos into a batch.
  333. Args:
  334. videos (list of np.ndarrays): List of videos to process.
  335. Returns:
  336. list of np.ndarrays: List containing a stacked tensor of the videos.
  337. """
  338. return [np.concatenate(videos, axis=0).astype(dtype=np.float32, copy=False)]