det_transforms.py 61 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. try:
  15. from collections.abc import Sequence
  16. except Exception:
  17. from collections import Sequence
  18. import random
  19. import os.path as osp
  20. import numpy as np
  21. import cv2
  22. from PIL import Image, ImageEnhance
  23. from .imgaug_support import execute_imgaug
  24. from .ops import *
  25. from .box_utils import *
  26. from .template import TemplateTransforms
  27. class DetTransform:
  28. """检测数据处理基类
  29. """
  30. def __init__(self):
  31. pass
  32. class Compose(DetTransform):
  33. """根据数据预处理/增强列表对输入数据进行操作。
  34. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  35. Args:
  36. transforms (list): 数据预处理/增强列表。
  37. Raises:
  38. TypeError: 形参数据类型不满足需求。
  39. ValueError: 数据长度不匹配。
  40. """
  41. def __init__(self, transforms):
  42. if not isinstance(transforms, list):
  43. raise TypeError('The transforms must be a list!')
  44. if len(transforms) < 1:
  45. raise ValueError('The length of transforms ' + \
  46. 'must be equal or larger than 1!')
  47. self.transforms = transforms
  48. self.use_mixup = False
  49. for t in self.transforms:
  50. if type(t).__name__ == 'MixupImage':
  51. self.use_mixup = True
  52. # 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
  53. for op in self.transforms:
  54. if not isinstance(op, DetTransform):
  55. import imgaug.augmenters as iaa
  56. if not isinstance(op, iaa.Augmenter):
  57. raise Exception(
  58. "Elements in transforms should be defined in 'paddlex.det.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
  59. )
  60. def __call__(self, im, im_info=None, label_info=None):
  61. """
  62. Args:
  63. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  64. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  65. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  66. - image_shape (np.ndarray): 图像原始大小,形状为(2,),
  67. image_shape[0]为高,image_shape[1]为宽。
  68. - mixup (list): list为[im, im_info, label_info],分别对应
  69. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  70. 注意,当前epoch若无需进行mixup,则无该字段。
  71. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  72. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  73. 其中n代表真实标注框的个数。
  74. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  75. 其中n代表真实标注框的个数。
  76. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  77. 其中n代表真实标注框的个数。
  78. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  79. 长度为n,其中n代表真实标注框的个数。
  80. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  81. 其中n代表真实标注框的个数。
  82. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  83. 其中n代表真实标注框的个数。
  84. Returns:
  85. tuple: 根据网络所需字段所组成的tuple;
  86. 字段由transforms中的最后一个数据预处理操作决定。
  87. """
  88. def decode_image(im_file, im_info, label_info):
  89. if im_info is None:
  90. im_info = dict()
  91. if isinstance(im_file, np.ndarray):
  92. if len(im_file.shape) != 3:
  93. raise Exception(
  94. "im should be 3-dimensions, but now is {}-dimensions".
  95. format(len(im_file.shape)))
  96. im = im_file
  97. else:
  98. try:
  99. im = cv2.imread(im_file).astype('float32')
  100. except:
  101. raise TypeError('Can\'t read The image file {}!'.format(
  102. im_file))
  103. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  104. # make default im_info with [h, w, 1]
  105. im_info['im_resize_info'] = np.array(
  106. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  107. im_info['image_shape'] = np.array([im.shape[0],
  108. im.shape[1]]).astype('int32')
  109. if not self.use_mixup:
  110. if 'mixup' in im_info:
  111. del im_info['mixup']
  112. # decode mixup image
  113. if 'mixup' in im_info:
  114. im_info['mixup'] = \
  115. decode_image(im_info['mixup'][0],
  116. im_info['mixup'][1],
  117. im_info['mixup'][2])
  118. if label_info is None:
  119. return (im, im_info)
  120. else:
  121. return (im, im_info, label_info)
  122. outputs = decode_image(im, im_info, label_info)
  123. im = outputs[0]
  124. im_info = outputs[1]
  125. if len(outputs) == 3:
  126. label_info = outputs[2]
  127. for op in self.transforms:
  128. if im is None:
  129. return None
  130. if isinstance(op, DetTransform):
  131. outputs = op(im, im_info, label_info)
  132. im = outputs[0]
  133. else:
  134. im = execute_imgaug(op, im)
  135. if label_info is not None:
  136. outputs = (im, im_info, label_info)
  137. else:
  138. outputs = (im, im_info)
  139. return outputs
  140. class ResizeByShort(DetTransform):
  141. """根据图像的短边调整图像大小(resize)。
  142. 1. 获取图像的长边和短边长度。
  143. 2. 根据短边与short_size的比例,计算长边的目标长度,
  144. 此时高、宽的resize比例为short_size/原图短边长度。
  145. 3. 如果max_size>0,调整resize比例:
  146. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  147. 4. 根据调整大小的比例对图像进行resize。
  148. Args:
  149. target_size (int): 短边目标长度。默认为800。
  150. max_size (int): 长边目标长度的最大限制。默认为1333。
  151. Raises:
  152. TypeError: 形参数据类型不满足需求。
  153. """
  154. def __init__(self, short_size=800, max_size=1333):
  155. self.max_size = int(max_size)
  156. if not isinstance(short_size, int):
  157. raise TypeError(
  158. "Type of short_size is invalid. Must be Integer, now is {}".
  159. format(type(short_size)))
  160. self.short_size = short_size
  161. if not (isinstance(self.max_size, int)):
  162. raise TypeError("max_size: input type is invalid.")
  163. def __call__(self, im, im_info=None, label_info=None):
  164. """
  165. Args:
  166. im (numnp.ndarraypy): 图像np.ndarray数据。
  167. im_info (dict, 可选): 存储与图像相关的信息。
  168. label_info (dict, 可选): 存储与标注框相关的信息。
  169. Returns:
  170. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  171. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  172. 存储与标注框相关信息的字典。
  173. 其中,im_info更新字段为:
  174. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  175. 三者组成的np.ndarray,形状为(3,)。
  176. Raises:
  177. TypeError: 形参数据类型不满足需求。
  178. ValueError: 数据长度不匹配。
  179. """
  180. if im_info is None:
  181. im_info = dict()
  182. if not isinstance(im, np.ndarray):
  183. raise TypeError("ResizeByShort: image type is not numpy.")
  184. if len(im.shape) != 3:
  185. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  186. im_short_size = min(im.shape[0], im.shape[1])
  187. im_long_size = max(im.shape[0], im.shape[1])
  188. scale = float(self.short_size) / im_short_size
  189. if self.max_size > 0 and np.round(scale *
  190. im_long_size) > self.max_size:
  191. scale = float(self.max_size) / float(im_long_size)
  192. resized_width = int(round(im.shape[1] * scale))
  193. resized_height = int(round(im.shape[0] * scale))
  194. im_resize_info = [resized_height, resized_width, scale]
  195. im = cv2.resize(
  196. im, (resized_width, resized_height),
  197. interpolation=cv2.INTER_LINEAR)
  198. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  199. if label_info is None:
  200. return (im, im_info)
  201. else:
  202. return (im, im_info, label_info)
  203. class Padding(DetTransform):
  204. """1.将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  205. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  206. 进行padding,最终输出图像为[320, 640]。
  207. 2.或者,将图像的长和宽padding到target_size指定的shape,如输入的图像为[300,640],
  208. a. `target_size` = 960,在图像最右和最下使用0值进行padding,最终输出
  209. 图像为[960, 960]。
  210. b. `target_size` = [640, 960],在图像最右和最下使用0值进行padding,最终
  211. 输出图像为[640, 960]。
  212. 1. 如果coarsest_stride为1,target_size为None则直接返回。
  213. 2. 获取图像的高H、宽W。
  214. 3. 计算填充后图像的高H_new、宽W_new。
  215. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  216. 并将原图的np.ndarray粘贴于左上角。
  217. Args:
  218. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  219. target_size (int|list|tuple): 填充后的图像长、宽,默认为None,coarset_stride优先级更高。
  220. Raises:
  221. TypeError: 形参`target_size`数据类型不满足需求。
  222. ValueError: 形参`target_size`为(list|tuple)时,长度不满足需求。
  223. """
  224. def __init__(self, coarsest_stride=1, target_size=None):
  225. self.coarsest_stride = coarsest_stride
  226. if target_size is not None:
  227. if not isinstance(target_size, int):
  228. if not isinstance(target_size, tuple) and not isinstance(
  229. target_size, list):
  230. raise TypeError(
  231. "Padding: Type of target_size must in (int|list|tuple)."
  232. )
  233. elif len(target_size) != 2:
  234. raise ValueError(
  235. "Padding: Length of target_size must equal 2.")
  236. self.target_size = target_size
  237. def __call__(self, im, im_info=None, label_info=None):
  238. """
  239. Args:
  240. im (numnp.ndarraypy): 图像np.ndarray数据。
  241. im_info (dict, 可选): 存储与图像相关的信息。
  242. label_info (dict, 可选): 存储与标注框相关的信息。
  243. Returns:
  244. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  245. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  246. 存储与标注框相关信息的字典。
  247. Raises:
  248. TypeError: 形参数据类型不满足需求。
  249. ValueError: 数据长度不匹配。
  250. ValueError: coarsest_stride,target_size需有且只有一个被指定。
  251. ValueError: target_size小于原图的大小。
  252. """
  253. if im_info is None:
  254. im_info = dict()
  255. if not isinstance(im, np.ndarray):
  256. raise TypeError("Padding: image type is not numpy.")
  257. if len(im.shape) != 3:
  258. raise ValueError('Padding: image is not 3-dimensional.')
  259. im_h, im_w, im_c = im.shape[:]
  260. if isinstance(self.target_size, int):
  261. padding_im_h = self.target_size
  262. padding_im_w = self.target_size
  263. elif isinstance(self.target_size, list) or isinstance(self.target_size,
  264. tuple):
  265. padding_im_w = self.target_size[0]
  266. padding_im_h = self.target_size[1]
  267. elif self.coarsest_stride > 0:
  268. padding_im_h = int(
  269. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  270. padding_im_w = int(
  271. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  272. else:
  273. raise ValueError(
  274. "coarsest_stridei(>1) or target_size(list|int) need setting in Padding transform"
  275. )
  276. pad_height = padding_im_h - im_h
  277. pad_width = padding_im_w - im_w
  278. if pad_height < 0 or pad_width < 0:
  279. raise ValueError(
  280. 'the size of image should be less than target_size, but the size of image ({}, {}), is larger than target_size ({}, {})'
  281. .format(im_w, im_h, padding_im_w, padding_im_h))
  282. padding_im = np.zeros(
  283. (padding_im_h, padding_im_w, im_c), dtype=np.float32)
  284. padding_im[:im_h, :im_w, :] = im
  285. if label_info is None:
  286. return (padding_im, im_info)
  287. else:
  288. return (padding_im, im_info, label_info)
  289. class Resize(DetTransform):
  290. """调整图像大小(resize)。
  291. - 当目标大小(target_size)类型为int时,根据插值方式,
  292. 将图像resize为[target_size, target_size]。
  293. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  294. 将图像resize为target_size。
  295. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  296. Args:
  297. target_size (int/list/tuple): 短边目标长度。默认为608。
  298. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  299. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  300. Raises:
  301. TypeError: 形参数据类型不满足需求。
  302. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  303. 'AREA', 'LANCZOS4', 'RANDOM']中。
  304. """
  305. # The interpolation mode
  306. interp_dict = {
  307. 'NEAREST': cv2.INTER_NEAREST,
  308. 'LINEAR': cv2.INTER_LINEAR,
  309. 'CUBIC': cv2.INTER_CUBIC,
  310. 'AREA': cv2.INTER_AREA,
  311. 'LANCZOS4': cv2.INTER_LANCZOS4
  312. }
  313. def __init__(self, target_size=608, interp='LINEAR'):
  314. self.interp = interp
  315. if not (interp == "RANDOM" or interp in self.interp_dict):
  316. raise ValueError("interp should be one of {}".format(
  317. self.interp_dict.keys()))
  318. if isinstance(target_size, list) or isinstance(target_size, tuple):
  319. if len(target_size) != 2:
  320. raise TypeError(
  321. 'when target is list or tuple, it should include 2 elements, but it is {}'
  322. .format(target_size))
  323. elif not isinstance(target_size, int):
  324. raise TypeError(
  325. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  326. .format(type(target_size)))
  327. self.target_size = target_size
  328. def __call__(self, im, im_info=None, label_info=None):
  329. """
  330. Args:
  331. im (np.ndarray): 图像np.ndarray数据。
  332. im_info (dict, 可选): 存储与图像相关的信息。
  333. label_info (dict, 可选): 存储与标注框相关的信息。
  334. Returns:
  335. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  336. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  337. 存储与标注框相关信息的字典。
  338. Raises:
  339. TypeError: 形参数据类型不满足需求。
  340. ValueError: 数据长度不匹配。
  341. """
  342. if im_info is None:
  343. im_info = dict()
  344. if not isinstance(im, np.ndarray):
  345. raise TypeError("Resize: image type is not numpy.")
  346. if len(im.shape) != 3:
  347. raise ValueError('Resize: image is not 3-dimensional.')
  348. if self.interp == "RANDOM":
  349. interp = random.choice(list(self.interp_dict.keys()))
  350. else:
  351. interp = self.interp
  352. im = resize(im, self.target_size, self.interp_dict[interp])
  353. if label_info is None:
  354. return (im, im_info)
  355. else:
  356. return (im, im_info, label_info)
  357. class RandomHorizontalFlip(DetTransform):
  358. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  359. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  360. 执行2-4步操作,否则直接返回。
  361. 2. 水平翻转图像。
  362. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  363. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  364. Args:
  365. prob (float): 随机水平翻转的概率。默认为0.5。
  366. Raises:
  367. TypeError: 形参数据类型不满足需求。
  368. """
  369. def __init__(self, prob=0.5):
  370. self.prob = prob
  371. if not isinstance(self.prob, float):
  372. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  373. def __call__(self, im, im_info=None, label_info=None):
  374. """
  375. Args:
  376. im (np.ndarray): 图像np.ndarray数据。
  377. im_info (dict, 可选): 存储与图像相关的信息。
  378. label_info (dict, 可选): 存储与标注框相关的信息。
  379. Returns:
  380. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  381. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  382. 存储与标注框相关信息的字典。
  383. 其中,im_info更新字段为:
  384. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  385. 其中n代表真实标注框的个数。
  386. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  387. 其中n代表真实标注框的个数。
  388. Raises:
  389. TypeError: 形参数据类型不满足需求。
  390. ValueError: 数据长度不匹配。
  391. """
  392. if not isinstance(im, np.ndarray):
  393. raise TypeError(
  394. "RandomHorizontalFlip: image is not a numpy array.")
  395. if len(im.shape) != 3:
  396. raise ValueError(
  397. "RandomHorizontalFlip: image is not 3-dimensional.")
  398. if im_info is None or label_info is None:
  399. raise TypeError(
  400. 'Cannot do RandomHorizontalFlip! ' +
  401. 'Becasuse the im_info and label_info can not be None!')
  402. if 'gt_bbox' not in label_info:
  403. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  404. 'Becasuse gt_bbox is not in label_info!')
  405. image_shape = im_info['image_shape']
  406. gt_bbox = label_info['gt_bbox']
  407. height = image_shape[0]
  408. width = image_shape[1]
  409. if np.random.uniform(0, 1) < self.prob:
  410. im = horizontal_flip(im)
  411. if gt_bbox.shape[0] == 0:
  412. if label_info is None:
  413. return (im, im_info)
  414. else:
  415. return (im, im_info, label_info)
  416. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  417. if 'gt_poly' in label_info and \
  418. len(label_info['gt_poly']) != 0:
  419. label_info['gt_poly'] = segms_horizontal_flip(
  420. label_info['gt_poly'], height, width)
  421. if label_info is None:
  422. return (im, im_info)
  423. else:
  424. return (im, im_info, label_info)
  425. class Normalize(DetTransform):
  426. """对图像进行标准化。
  427. 1. 归一化图像到到区间[0.0, 1.0]。
  428. 2. 对图像进行减均值除以标准差操作。
  429. Args:
  430. mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
  431. std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
  432. Raises:
  433. TypeError: 形参数据类型不满足需求。
  434. """
  435. def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
  436. self.mean = mean
  437. self.std = std
  438. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  439. raise TypeError("NormalizeImage: input type is invalid.")
  440. from functools import reduce
  441. if reduce(lambda x, y: x * y, self.std) == 0:
  442. raise TypeError('NormalizeImage: std is invalid!')
  443. def __call__(self, im, im_info=None, label_info=None):
  444. """
  445. Args:
  446. im (numnp.ndarraypy): 图像np.ndarray数据。
  447. im_info (dict, 可选): 存储与图像相关的信息。
  448. label_info (dict, 可选): 存储与标注框相关的信息。
  449. Returns:
  450. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  451. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  452. 存储与标注框相关信息的字典。
  453. """
  454. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  455. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  456. im = normalize(im, mean, std)
  457. if label_info is None:
  458. return (im, im_info)
  459. else:
  460. return (im, im_info, label_info)
  461. class RandomDistort(DetTransform):
  462. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  463. 1. 对变换的操作顺序进行随机化操作。
  464. 2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
  465. Args:
  466. brightness_range (float): 明亮度因子的范围。默认为0.5。
  467. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  468. contrast_range (float): 对比度因子的范围。默认为0.5。
  469. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  470. saturation_range (float): 饱和度因子的范围。默认为0.5。
  471. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  472. hue_range (int): 色调因子的范围。默认为18。
  473. hue_prob (float): 随机调整色调的概率。默认为0.5。
  474. """
  475. def __init__(self,
  476. brightness_range=0.5,
  477. brightness_prob=0.5,
  478. contrast_range=0.5,
  479. contrast_prob=0.5,
  480. saturation_range=0.5,
  481. saturation_prob=0.5,
  482. hue_range=18,
  483. hue_prob=0.5):
  484. self.brightness_range = brightness_range
  485. self.brightness_prob = brightness_prob
  486. self.contrast_range = contrast_range
  487. self.contrast_prob = contrast_prob
  488. self.saturation_range = saturation_range
  489. self.saturation_prob = saturation_prob
  490. self.hue_range = hue_range
  491. self.hue_prob = hue_prob
  492. def __call__(self, im, im_info=None, label_info=None):
  493. """
  494. Args:
  495. im (np.ndarray): 图像np.ndarray数据。
  496. im_info (dict, 可选): 存储与图像相关的信息。
  497. label_info (dict, 可选): 存储与标注框相关的信息。
  498. Returns:
  499. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  500. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  501. 存储与标注框相关信息的字典。
  502. """
  503. brightness_lower = 1 - self.brightness_range
  504. brightness_upper = 1 + self.brightness_range
  505. contrast_lower = 1 - self.contrast_range
  506. contrast_upper = 1 + self.contrast_range
  507. saturation_lower = 1 - self.saturation_range
  508. saturation_upper = 1 + self.saturation_range
  509. hue_lower = -self.hue_range
  510. hue_upper = self.hue_range
  511. ops = [brightness, contrast, saturation, hue]
  512. random.shuffle(ops)
  513. params_dict = {
  514. 'brightness': {
  515. 'brightness_lower': brightness_lower,
  516. 'brightness_upper': brightness_upper
  517. },
  518. 'contrast': {
  519. 'contrast_lower': contrast_lower,
  520. 'contrast_upper': contrast_upper
  521. },
  522. 'saturation': {
  523. 'saturation_lower': saturation_lower,
  524. 'saturation_upper': saturation_upper
  525. },
  526. 'hue': {
  527. 'hue_lower': hue_lower,
  528. 'hue_upper': hue_upper
  529. }
  530. }
  531. prob_dict = {
  532. 'brightness': self.brightness_prob,
  533. 'contrast': self.contrast_prob,
  534. 'saturation': self.saturation_prob,
  535. 'hue': self.hue_prob
  536. }
  537. for id in range(4):
  538. params = params_dict[ops[id].__name__]
  539. prob = prob_dict[ops[id].__name__]
  540. params['im'] = im
  541. if np.random.uniform(0, 1) < prob:
  542. im = ops[id](**params)
  543. if label_info is None:
  544. return (im, im_info)
  545. else:
  546. return (im, im_info, label_info)
  547. class MixupImage(DetTransform):
  548. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  549. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  550. 1. 从随机beta分布中抽取出随机因子factor。
  551. 2.
  552. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  553. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  554. - 其余情况,执行下述操作:
  555. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  556. (2)拼接原图像标注框和mixup图像标注框。
  557. (3)拼接原图像标注框类别和mixup图像标注框类别。
  558. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  559. 3. 更新im_info中的image_shape信息。
  560. Args:
  561. alpha (float): 随机beta分布的下限。默认为1.5。
  562. beta (float): 随机beta分布的上限。默认为1.5。
  563. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  564. 默认为-1。
  565. Raises:
  566. ValueError: 数据长度不匹配。
  567. """
  568. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  569. self.alpha = alpha
  570. self.beta = beta
  571. if self.alpha <= 0.0:
  572. raise ValueError("alpha shold be positive in MixupImage")
  573. if self.beta <= 0.0:
  574. raise ValueError("beta shold be positive in MixupImage")
  575. self.mixup_epoch = mixup_epoch
  576. def _mixup_img(self, img1, img2, factor):
  577. h = max(img1.shape[0], img2.shape[0])
  578. w = max(img1.shape[1], img2.shape[1])
  579. img = np.zeros((h, w, img1.shape[2]), 'float32')
  580. img[:img1.shape[0], :img1.shape[1], :] = \
  581. img1.astype('float32') * factor
  582. img[:img2.shape[0], :img2.shape[1], :] += \
  583. img2.astype('float32') * (1.0 - factor)
  584. return img.astype('float32')
  585. def __call__(self, im, im_info=None, label_info=None):
  586. """
  587. Args:
  588. im (np.ndarray): 图像np.ndarray数据。
  589. im_info (dict, 可选): 存储与图像相关的信息。
  590. label_info (dict, 可选): 存储与标注框相关的信息。
  591. Returns:
  592. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  593. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  594. 存储与标注框相关信息的字典。
  595. 其中,im_info更新字段为:
  596. - image_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  597. im_info删除的字段:
  598. - mixup (list): 与当前字段进行mixup的图像相关信息。
  599. label_info更新字段为:
  600. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  601. 其中n代表真实标注框的个数。
  602. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  603. 其中n代表真实标注框的个数。
  604. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  605. 其中n代表真实标注框的个数。
  606. Raises:
  607. TypeError: 形参数据类型不满足需求。
  608. """
  609. if im_info is None:
  610. raise TypeError('Cannot do MixupImage! ' +
  611. 'Becasuse the im_info can not be None!')
  612. if 'mixup' not in im_info:
  613. if label_info is None:
  614. return (im, im_info)
  615. else:
  616. return (im, im_info, label_info)
  617. factor = np.random.beta(self.alpha, self.beta)
  618. factor = max(0.0, min(1.0, factor))
  619. if im_info['epoch'] > self.mixup_epoch \
  620. or factor >= 1.0:
  621. im_info.pop('mixup')
  622. if label_info is None:
  623. return (im, im_info)
  624. else:
  625. return (im, im_info, label_info)
  626. if factor <= 0.0:
  627. return im_info.pop('mixup')
  628. im = self._mixup_img(im, im_info['mixup'][0], factor)
  629. if label_info is None:
  630. raise TypeError('Cannot do MixupImage! ' +
  631. 'Becasuse the label_info can not be None!')
  632. if 'gt_bbox' not in label_info or \
  633. 'gt_class' not in label_info or \
  634. 'gt_score' not in label_info:
  635. raise TypeError('Cannot do MixupImage! ' + \
  636. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  637. gt_bbox1 = label_info['gt_bbox']
  638. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  639. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  640. gt_class1 = label_info['gt_class']
  641. gt_class2 = im_info['mixup'][2]['gt_class']
  642. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  643. gt_score1 = label_info['gt_score']
  644. gt_score2 = im_info['mixup'][2]['gt_score']
  645. gt_score = np.concatenate(
  646. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  647. if 'gt_poly' in label_info:
  648. gt_poly1 = label_info['gt_poly']
  649. gt_poly2 = im_info['mixup'][2]['gt_poly']
  650. label_info['gt_poly'] = gt_poly1 + gt_poly2
  651. is_crowd1 = label_info['is_crowd']
  652. is_crowd2 = im_info['mixup'][2]['is_crowd']
  653. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  654. label_info['gt_bbox'] = gt_bbox
  655. label_info['gt_score'] = gt_score
  656. label_info['gt_class'] = gt_class
  657. label_info['is_crowd'] = is_crowd
  658. im_info['image_shape'] = np.array([im.shape[0],
  659. im.shape[1]]).astype('int32')
  660. im_info.pop('mixup')
  661. if label_info is None:
  662. return (im, im_info)
  663. else:
  664. return (im, im_info, label_info)
  665. class RandomExpand(DetTransform):
  666. """随机扩张图像,模型训练时的数据增强操作。
  667. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  668. 2. 计算扩张后图像大小。
  669. 3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
  670. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  671. 5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
  672. Args:
  673. ratio (float): 图像扩张的最大比例。默认为4.0。
  674. prob (float): 随机扩张的概率。默认为0.5。
  675. fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
  676. """
  677. def __init__(self,
  678. ratio=4.,
  679. prob=0.5,
  680. fill_value=[123.675, 116.28, 103.53]):
  681. super(RandomExpand, self).__init__()
  682. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  683. self.ratio = ratio
  684. self.prob = prob
  685. assert isinstance(fill_value, Sequence), \
  686. "fill value must be sequence"
  687. if not isinstance(fill_value, tuple):
  688. fill_value = tuple(fill_value)
  689. self.fill_value = fill_value
  690. def __call__(self, im, im_info=None, label_info=None):
  691. """
  692. Args:
  693. im (np.ndarray): 图像np.ndarray数据。
  694. im_info (dict, 可选): 存储与图像相关的信息。
  695. label_info (dict, 可选): 存储与标注框相关的信息。
  696. Returns:
  697. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  698. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  699. 存储与标注框相关信息的字典。
  700. 其中,im_info更新字段为:
  701. - image_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  702. label_info更新字段为:
  703. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  704. 其中n代表真实标注框的个数。
  705. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  706. 其中n代表真实标注框的个数。
  707. Raises:
  708. TypeError: 形参数据类型不满足需求。
  709. """
  710. if im_info is None or label_info is None:
  711. raise TypeError(
  712. 'Cannot do RandomExpand! ' +
  713. 'Becasuse the im_info and label_info can not be None!')
  714. if 'gt_bbox' not in label_info or \
  715. 'gt_class' not in label_info:
  716. raise TypeError('Cannot do RandomExpand! ' + \
  717. 'Becasuse gt_bbox/gt_class is not in label_info!')
  718. if np.random.uniform(0., 1.) < self.prob:
  719. return (im, im_info, label_info)
  720. image_shape = im_info['image_shape']
  721. height = int(image_shape[0])
  722. width = int(image_shape[1])
  723. expand_ratio = np.random.uniform(1., self.ratio)
  724. h = int(height * expand_ratio)
  725. w = int(width * expand_ratio)
  726. if not h > height or not w > width:
  727. return (im, im_info, label_info)
  728. y = np.random.randint(0, h - height)
  729. x = np.random.randint(0, w - width)
  730. canvas = np.ones((h, w, 3), dtype=np.float32)
  731. canvas *= np.array(self.fill_value, dtype=np.float32)
  732. canvas[y:y + height, x:x + width, :] = im
  733. im_info['image_shape'] = np.array([h, w]).astype('int32')
  734. if 'gt_bbox' in label_info and len(label_info['gt_bbox']) > 0:
  735. label_info['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)
  736. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  737. label_info['gt_poly'] = expand_segms(label_info['gt_poly'], x, y,
  738. height, width, expand_ratio)
  739. return (canvas, im_info, label_info)
  740. class RandomCrop(DetTransform):
  741. """随机裁剪图像。
  742. 1. 若allow_no_crop为True,则在thresholds加入’no_crop’。
  743. 2. 随机打乱thresholds。
  744. 3. 遍历thresholds中各元素:
  745. (1) 如果当前thresh为’no_crop’,则返回原始图像和标注信息。
  746. (2) 随机取出aspect_ratio和scaling中的值并由此计算出候选裁剪区域的高、宽、起始点。
  747. (3) 计算真实标注框与候选裁剪区域IoU,若全部真实标注框的IoU都小于thresh,则继续第3步。
  748. (4) 如果cover_all_box为True且存在真实标注框的IoU小于thresh,则继续第3步。
  749. (5) 筛选出位于候选裁剪区域内的真实标注框,若有效框的个数为0,则继续第3步,否则进行第4步。
  750. 4. 换算有效真值标注框相对候选裁剪区域的位置坐标。
  751. 5. 换算有效分割区域相对候选裁剪区域的位置坐标。
  752. Args:
  753. aspect_ratio (list): 裁剪后短边缩放比例的取值范围,以[min, max]形式表示。默认值为[.5, 2.]。
  754. thresholds (list): 判断裁剪候选区域是否有效所需的IoU阈值取值列表。默认值为[.0, .1, .3, .5, .7, .9]。
  755. scaling (list): 裁剪面积相对原面积的取值范围,以[min, max]形式表示。默认值为[.3, 1.]。
  756. num_attempts (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
  757. allow_no_crop (bool): 是否允许未进行裁剪。默认值为True。
  758. cover_all_box (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
  759. """
  760. def __init__(self,
  761. aspect_ratio=[.5, 2.],
  762. thresholds=[.0, .1, .3, .5, .7, .9],
  763. scaling=[.3, 1.],
  764. num_attempts=50,
  765. allow_no_crop=True,
  766. cover_all_box=False):
  767. self.aspect_ratio = aspect_ratio
  768. self.thresholds = thresholds
  769. self.scaling = scaling
  770. self.num_attempts = num_attempts
  771. self.allow_no_crop = allow_no_crop
  772. self.cover_all_box = cover_all_box
  773. def __call__(self, im, im_info=None, label_info=None):
  774. """
  775. Args:
  776. im (np.ndarray): 图像np.ndarray数据。
  777. im_info (dict, 可选): 存储与图像相关的信息。
  778. label_info (dict, 可选): 存储与标注框相关的信息。
  779. Returns:
  780. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  781. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  782. 存储与标注框相关信息的字典。
  783. 其中,im_info更新字段为:
  784. - image_shape (np.ndarray): 扩裁剪的图像高、宽二者组成的np.ndarray,形状为(2,)。
  785. label_info更新字段为:
  786. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  787. 其中n代表真实标注框的个数。
  788. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  789. 其中n代表真实标注框的个数。
  790. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  791. 其中n代表真实标注框的个数。
  792. Raises:
  793. TypeError: 形参数据类型不满足需求。
  794. """
  795. if im_info is None or label_info is None:
  796. raise TypeError(
  797. 'Cannot do RandomCrop! ' +
  798. 'Becasuse the im_info and label_info can not be None!')
  799. if 'gt_bbox' not in label_info or \
  800. 'gt_class' not in label_info:
  801. raise TypeError('Cannot do RandomCrop! ' + \
  802. 'Becasuse gt_bbox/gt_class is not in label_info!')
  803. if len(label_info['gt_bbox']) == 0:
  804. return (im, im_info, label_info)
  805. image_shape = im_info['image_shape']
  806. w = image_shape[1]
  807. h = image_shape[0]
  808. gt_bbox = label_info['gt_bbox']
  809. thresholds = list(self.thresholds)
  810. if self.allow_no_crop:
  811. thresholds.append('no_crop')
  812. np.random.shuffle(thresholds)
  813. for thresh in thresholds:
  814. if thresh == 'no_crop':
  815. return (im, im_info, label_info)
  816. found = False
  817. for i in range(self.num_attempts):
  818. scale = np.random.uniform(*self.scaling)
  819. min_ar, max_ar = self.aspect_ratio
  820. aspect_ratio = np.random.uniform(
  821. max(min_ar, scale**2), min(max_ar, scale**-2))
  822. crop_h = int(h * scale / np.sqrt(aspect_ratio))
  823. crop_w = int(w * scale * np.sqrt(aspect_ratio))
  824. crop_y = np.random.randint(0, h - crop_h)
  825. crop_x = np.random.randint(0, w - crop_w)
  826. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  827. iou = iou_matrix(
  828. gt_bbox, np.array(
  829. [crop_box], dtype=np.float32))
  830. if iou.max() < thresh:
  831. continue
  832. if self.cover_all_box and iou.min() < thresh:
  833. continue
  834. cropped_box, valid_ids = crop_box_with_center_constraint(
  835. gt_bbox, np.array(
  836. crop_box, dtype=np.float32))
  837. if valid_ids.size > 0:
  838. found = True
  839. break
  840. if found:
  841. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  842. crop_polys = crop_segms(
  843. label_info['gt_poly'],
  844. valid_ids,
  845. np.array(
  846. crop_box, dtype=np.int64),
  847. h,
  848. w)
  849. if [] in crop_polys:
  850. delete_id = list()
  851. valid_polys = list()
  852. for id, crop_poly in enumerate(crop_polys):
  853. if crop_poly == []:
  854. delete_id.append(id)
  855. else:
  856. valid_polys.append(crop_poly)
  857. valid_ids = np.delete(valid_ids, delete_id)
  858. if len(valid_polys) == 0:
  859. return (im, im_info, label_info)
  860. label_info['gt_poly'] = valid_polys
  861. else:
  862. label_info['gt_poly'] = crop_polys
  863. im = crop_image(im, crop_box)
  864. label_info['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  865. label_info['gt_class'] = np.take(
  866. label_info['gt_class'], valid_ids, axis=0)
  867. im_info['image_shape'] = np.array(
  868. [crop_box[3] - crop_box[1],
  869. crop_box[2] - crop_box[0]]).astype('int32')
  870. if 'gt_score' in label_info:
  871. label_info['gt_score'] = np.take(
  872. label_info['gt_score'], valid_ids, axis=0)
  873. if 'is_crowd' in label_info:
  874. label_info['is_crowd'] = np.take(
  875. label_info['is_crowd'], valid_ids, axis=0)
  876. return (im, im_info, label_info)
  877. return (im, im_info, label_info)
  878. class ArrangeFasterRCNN(DetTransform):
  879. """获取FasterRCNN模型训练/验证/预测所需信息。
  880. Args:
  881. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  882. Raises:
  883. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  884. """
  885. def __init__(self, mode=None):
  886. if mode not in ['train', 'eval', 'test', 'quant']:
  887. raise ValueError(
  888. "mode must be in ['train', 'eval', 'test', 'quant']!")
  889. self.mode = mode
  890. def __call__(self, im, im_info=None, label_info=None):
  891. """
  892. Args:
  893. im (np.ndarray): 图像np.ndarray数据。
  894. im_info (dict, 可选): 存储与图像相关的信息。
  895. label_info (dict, 可选): 存储与标注框相关的信息。
  896. Returns:
  897. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  898. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  899. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  900. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  901. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  902. 图像相当对于原图的resize信息、图像大小信息。
  903. Raises:
  904. TypeError: 形参数据类型不满足需求。
  905. ValueError: 数据长度不匹配。
  906. """
  907. im = permute(im, False)
  908. if self.mode == 'train':
  909. if im_info is None or label_info is None:
  910. raise TypeError(
  911. 'Cannot do ArrangeFasterRCNN! ' +
  912. 'Becasuse the im_info and label_info can not be None!')
  913. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  914. raise ValueError("gt num mismatch: bbox and class.")
  915. im_resize_info = im_info['im_resize_info']
  916. gt_bbox = label_info['gt_bbox']
  917. gt_class = label_info['gt_class']
  918. is_crowd = label_info['is_crowd']
  919. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  920. elif self.mode == 'eval':
  921. if im_info is None or label_info is None:
  922. raise TypeError(
  923. 'Cannot do ArrangeFasterRCNN! ' +
  924. 'Becasuse the im_info and label_info can not be None!')
  925. im_resize_info = im_info['im_resize_info']
  926. im_id = im_info['im_id']
  927. im_shape = np.array(
  928. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  929. dtype=np.float32)
  930. gt_bbox = label_info['gt_bbox']
  931. gt_class = label_info['gt_class']
  932. is_difficult = label_info['difficult']
  933. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  934. is_difficult)
  935. else:
  936. if im_info is None:
  937. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  938. 'Becasuse the im_info can not be None!')
  939. im_resize_info = im_info['im_resize_info']
  940. im_shape = np.array(
  941. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  942. dtype=np.float32)
  943. outputs = (im, im_resize_info, im_shape)
  944. return outputs
  945. class ArrangeMaskRCNN(DetTransform):
  946. """获取MaskRCNN模型训练/验证/预测所需信息。
  947. Args:
  948. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  949. Raises:
  950. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  951. """
  952. def __init__(self, mode=None):
  953. if mode not in ['train', 'eval', 'test', 'quant']:
  954. raise ValueError(
  955. "mode must be in ['train', 'eval', 'test', 'quant']!")
  956. self.mode = mode
  957. def __call__(self, im, im_info=None, label_info=None):
  958. """
  959. Args:
  960. im (np.ndarray): 图像np.ndarray数据。
  961. im_info (dict, 可选): 存储与图像相关的信息。
  962. label_info (dict, 可选): 存储与标注框相关的信息。
  963. Returns:
  964. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  965. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  966. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  967. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  968. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  969. Raises:
  970. TypeError: 形参数据类型不满足需求。
  971. ValueError: 数据长度不匹配。
  972. """
  973. im = permute(im, False)
  974. if self.mode == 'train':
  975. if im_info is None or label_info is None:
  976. raise TypeError(
  977. 'Cannot do ArrangeTrainMaskRCNN! ' +
  978. 'Becasuse the im_info and label_info can not be None!')
  979. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  980. raise ValueError("gt num mismatch: bbox and class.")
  981. im_resize_info = im_info['im_resize_info']
  982. gt_bbox = label_info['gt_bbox']
  983. gt_class = label_info['gt_class']
  984. is_crowd = label_info['is_crowd']
  985. assert 'gt_poly' in label_info
  986. segms = label_info['gt_poly']
  987. if len(segms) != 0:
  988. assert len(segms) == is_crowd.shape[0]
  989. gt_masks = []
  990. valid = True
  991. for i in range(len(segms)):
  992. segm = segms[i]
  993. gt_segm = []
  994. if is_crowd[i]:
  995. gt_segm.append([[0, 0]])
  996. else:
  997. for poly in segm:
  998. if len(poly) == 0:
  999. valid = False
  1000. break
  1001. gt_segm.append(np.array(poly).reshape(-1, 2))
  1002. if (not valid) or len(gt_segm) == 0:
  1003. break
  1004. gt_masks.append(gt_segm)
  1005. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  1006. gt_masks)
  1007. else:
  1008. if im_info is None:
  1009. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  1010. 'Becasuse the im_info can not be None!')
  1011. im_resize_info = im_info['im_resize_info']
  1012. im_shape = np.array(
  1013. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  1014. dtype=np.float32)
  1015. if self.mode == 'eval':
  1016. im_id = im_info['im_id']
  1017. outputs = (im, im_resize_info, im_id, im_shape)
  1018. else:
  1019. outputs = (im, im_resize_info, im_shape)
  1020. return outputs
  1021. class ArrangeYOLOv3(DetTransform):
  1022. """获取YOLOv3模型训练/验证/预测所需信息。
  1023. Args:
  1024. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  1025. Raises:
  1026. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  1027. """
  1028. def __init__(self, mode=None):
  1029. if mode not in ['train', 'eval', 'test', 'quant']:
  1030. raise ValueError(
  1031. "mode must be in ['train', 'eval', 'test', 'quant']!")
  1032. self.mode = mode
  1033. def __call__(self, im, im_info=None, label_info=None):
  1034. """
  1035. Args:
  1036. im (np.ndarray): 图像np.ndarray数据。
  1037. im_info (dict, 可选): 存储与图像相关的信息。
  1038. label_info (dict, 可选): 存储与标注框相关的信息。
  1039. Returns:
  1040. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  1041. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  1042. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  1043. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  1044. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  1045. 分别对应图像np.ndarray数据、图像大小信息。
  1046. Raises:
  1047. TypeError: 形参数据类型不满足需求。
  1048. ValueError: 数据长度不匹配。
  1049. """
  1050. im = permute(im, False)
  1051. if self.mode == 'train':
  1052. if im_info is None or label_info is None:
  1053. raise TypeError(
  1054. 'Cannot do ArrangeYolov3! ' +
  1055. 'Becasuse the im_info and label_info can not be None!')
  1056. im_shape = im_info['image_shape']
  1057. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1058. raise ValueError("gt num mismatch: bbox and class.")
  1059. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1060. raise ValueError("gt num mismatch: bbox and score.")
  1061. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1062. gt_class = np.zeros((50, ), dtype=np.int32)
  1063. gt_score = np.zeros((50, ), dtype=im.dtype)
  1064. gt_num = min(50, len(label_info['gt_bbox']))
  1065. if gt_num > 0:
  1066. label_info['gt_class'][:gt_num, 0] = label_info[
  1067. 'gt_class'][:gt_num, 0] - 1
  1068. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1069. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1070. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1071. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1072. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1073. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1074. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1075. elif self.mode == 'eval':
  1076. if im_info is None or label_info is None:
  1077. raise TypeError(
  1078. 'Cannot do ArrangeYolov3! ' +
  1079. 'Becasuse the im_info and label_info can not be None!')
  1080. im_shape = im_info['image_shape']
  1081. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1082. raise ValueError("gt num mismatch: bbox and class.")
  1083. im_id = im_info['im_id']
  1084. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1085. gt_class = np.zeros((50, ), dtype=np.int32)
  1086. difficult = np.zeros((50, ), dtype=np.int32)
  1087. gt_num = min(50, len(label_info['gt_bbox']))
  1088. if gt_num > 0:
  1089. label_info['gt_class'][:gt_num, 0] = label_info[
  1090. 'gt_class'][:gt_num, 0] - 1
  1091. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1092. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1093. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1094. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1095. else:
  1096. if im_info is None:
  1097. raise TypeError('Cannot do ArrangeYolov3! ' +
  1098. 'Becasuse the im_info can not be None!')
  1099. im_shape = im_info['image_shape']
  1100. outputs = (im, im_shape)
  1101. return outputs
  1102. class BasicRCNNTransforms(TemplateTransforms):
  1103. """ RCNN模型(faster-rcnn/mask-rcnn)图像处理流程,具体如下,
  1104. 训练阶段:
  1105. 1. 随机以0.5的概率将图像水平翻转
  1106. 2. 图像归一化
  1107. 3. 图像按比例Resize,scale计算方式如下
  1108. scale = min_max_size[0] / short_size_of_image
  1109. if max_size_of_image * scale > min_max_size[1]:
  1110. scale = min_max_size[1] / max_size_of_image
  1111. 4. 将3步骤的长宽进行padding,使得长宽为32的倍数
  1112. 验证阶段:
  1113. 1. 图像归一化
  1114. 2. 图像按比例Resize,scale计算方式同上训练阶段
  1115. 3. 将2步骤的长宽进行padding,使得长宽为32的倍数
  1116. Args:
  1117. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1118. min_max_size(list): 图像在缩放时,最小边和最大边的约束条件
  1119. mean(list): 图像均值
  1120. std(list): 图像方差
  1121. """
  1122. def __init__(self,
  1123. mode,
  1124. min_max_size=[800, 1333],
  1125. mean=[0.485, 0.456, 0.406],
  1126. std=[0.229, 0.224, 0.225]):
  1127. super(RCNNTransforms, self).__init__(mode=mode)
  1128. if self.mode == 'train':
  1129. # 训练时的transforms,包含数据增强
  1130. self.transforms = transforms.Compose([
  1131. transforms.RandomHorizontalFlip(prob=0.5),
  1132. transforms.Normalize(
  1133. mean=mean, std=std), transforms.ResizeByShort(
  1134. short_size=min_max_size[0], max_size=min_max_size[1]),
  1135. transforms.Padding(coarsest_stride=32)
  1136. ])
  1137. else:
  1138. # 验证/预测时的transforms
  1139. self.transforms = transforms.Compose([
  1140. transforms.Normalize(
  1141. mean=mean, std=std), transforms.ResizeByShort(
  1142. short_size=min_max_size[0], max_size=min_max_size[1]),
  1143. transforms.Padding(coarsest_stride=32)
  1144. ])
  1145. class BasicYOLOTransforms(TemplateTransforms):
  1146. """YOLOv3模型的图像预处理流程,具体如下,
  1147. 训练阶段:
  1148. 1. 在前mixup_epoch轮迭代中,使用MixupImage策略,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#mixupimage
  1149. 2. 对图像进行随机扰动,包括亮度,对比度,饱和度和色调
  1150. 3. 随机扩充图像,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#randomexpand
  1151. 4. 随机裁剪图像
  1152. 5. 将4步骤的输出图像Resize成shape参数的大小
  1153. 6. 随机0.5的概率水平翻转图像
  1154. 7. 图像归一化
  1155. 验证/预测阶段:
  1156. 1. 将图像Resize成shape参数大小
  1157. 2. 图像归一化
  1158. Args:
  1159. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1160. shape(list): 输入模型中图像的大小,输入模型的图像会被Resize成此大小
  1161. mixup_epoch(int): 模型训练过程中,前mixup_epoch会使用mixup策略
  1162. mean(list): 图像均值
  1163. std(list): 图像方差
  1164. """
  1165. def __init__(self,
  1166. mode,
  1167. shape=[608, 608],
  1168. mixup_epoch=250,
  1169. mean=[0.485, 0.456, 0.406],
  1170. std=[0.229, 0.224, 0.225]):
  1171. super(YOLOTransforms, self).__init__(mode=mode)
  1172. width = shape
  1173. if isinstance(shape, list):
  1174. if shape[0] != shape[1]:
  1175. raise Exception(
  1176. "In YOLOv3 model, width and height should be equal")
  1177. width = shape[0]
  1178. if width % 32 != 0:
  1179. raise Exception(
  1180. "In YOLOv3 model, width and height should be multiple of 32, e.g 224、256、320...."
  1181. )
  1182. if self.mode == 'train':
  1183. # 训练时的transforms,包含数据增强
  1184. self.transforms = transforms.Compose([
  1185. transforms.MixupImage(mixup_epoch=mixup_epoch),
  1186. transforms.RandomDistort(), transforms.RandomExpand(),
  1187. transforms.RandomCrop(), transforms.Resize(
  1188. target_size=width, interp='RANDOM'),
  1189. transforms.RandomHorizontalFlip(), transforms.Normalize(
  1190. mean=mean, std=std)
  1191. ])
  1192. else:
  1193. # 验证/预测时的transforms
  1194. self.transforms = transforms.Compose([
  1195. transforms.Resize(
  1196. target_size=width, interp='CUBIC'), transforms.Normalize(
  1197. mean=mean, std=std)
  1198. ])