det_transforms.py 62 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. try:
  15. from collections.abc import Sequence
  16. except Exception:
  17. from collections import Sequence
  18. import random
  19. import os
  20. import os.path as osp
  21. import numpy as np
  22. import cv2
  23. from PIL import Image, ImageEnhance
  24. from .imgaug_support import execute_imgaug
  25. from .ops import *
  26. from .box_utils import *
  27. import paddlex.utils.logging as logging
  28. class DetTransform:
  29. """检测数据处理基类
  30. """
  31. def __init__(self):
  32. pass
  33. class Compose(DetTransform):
  34. """根据数据预处理/增强列表对输入数据进行操作。
  35. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  36. Args:
  37. transforms (list): 数据预处理/增强列表。
  38. Raises:
  39. TypeError: 形参数据类型不满足需求。
  40. ValueError: 数据长度不匹配。
  41. """
  42. def __init__(self, transforms):
  43. if not isinstance(transforms, list):
  44. raise TypeError('The transforms must be a list!')
  45. if len(transforms) < 1:
  46. raise ValueError('The length of transforms ' + \
  47. 'must be equal or larger than 1!')
  48. self.transforms = transforms
  49. self.use_mixup = False
  50. for t in self.transforms:
  51. if type(t).__name__ == 'MixupImage':
  52. self.use_mixup = True
  53. # 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
  54. for op in self.transforms:
  55. if not isinstance(op, DetTransform):
  56. import imgaug.augmenters as iaa
  57. if not isinstance(op, iaa.Augmenter):
  58. raise Exception(
  59. "Elements in transforms should be defined in 'paddlex.det.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
  60. )
  61. def __call__(self, im, im_info=None, label_info=None, images_writer=None, step=0):
  62. """
  63. Args:
  64. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  65. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  66. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  67. - image_shape (np.ndarray): 图像原始大小,形状为(2,),
  68. image_shape[0]为高,image_shape[1]为宽。
  69. - mixup (list): list为[im, im_info, label_info],分别对应
  70. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  71. 注意,当前epoch若无需进行mixup,则无该字段。
  72. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  73. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  74. 其中n代表真实标注框的个数。
  75. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  76. 其中n代表真实标注框的个数。
  77. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  78. 其中n代表真实标注框的个数。
  79. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  80. 长度为n,其中n代表真实标注框的个数。
  81. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  82. 其中n代表真实标注框的个数。
  83. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  84. 其中n代表真实标注框的个数。
  85. Returns:
  86. tuple: 根据网络所需字段所组成的tuple;
  87. 字段由transforms中的最后一个数据预处理操作决定。
  88. """
  89. def decode_image(im_file, im_info, label_info):
  90. if im_info is None:
  91. im_info = dict()
  92. if isinstance(im_file, np.ndarray):
  93. if len(im_file.shape) != 3:
  94. raise Exception(
  95. "im should be 3-dimensions, but now is {}-dimensions".
  96. format(len(im_file.shape)))
  97. im = im_file
  98. else:
  99. try:
  100. im = cv2.imread(im_file).astype('float32')
  101. except:
  102. raise TypeError('Can\'t read The image file {}!'.format(
  103. im_file))
  104. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  105. # make default im_info with [h, w, 1]
  106. im_info['im_resize_info'] = np.array(
  107. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  108. im_info['image_shape'] = np.array([im.shape[0],
  109. im.shape[1]]).astype('int32')
  110. if not self.use_mixup:
  111. if 'mixup' in im_info:
  112. del im_info['mixup']
  113. # decode mixup image
  114. if 'mixup' in im_info:
  115. im_info['mixup'] = \
  116. decode_image(im_info['mixup'][0],
  117. im_info['mixup'][1],
  118. im_info['mixup'][2])
  119. if label_info is None:
  120. return (im, im_info)
  121. else:
  122. return (im, im_info, label_info)
  123. outputs = decode_image(im, im_info, label_info)
  124. im = outputs[0]
  125. im_info = outputs[1]
  126. if len(outputs) == 3:
  127. label_info = outputs[2]
  128. if images_writer is not None:
  129. images_writer.add_image(tag='0. origin image',
  130. img=im,
  131. step=step)
  132. op_id = 1
  133. for op in self.transforms:
  134. if im is None:
  135. return None
  136. if isinstance(op, DetTransform):
  137. outputs = op(im, im_info, label_info)
  138. im = outputs[0]
  139. else:
  140. im = execute_imgaug(op, im)
  141. if label_info is not None:
  142. outputs = (im, im_info, label_info)
  143. else:
  144. outputs = (im, im_info)
  145. if images_writer is not None:
  146. tag = str(op_id) + '. ' + op.__class__.__name__
  147. images_writer.add_image(tag=tag,
  148. img=im,
  149. step=step)
  150. op_id += 1
  151. return outputs
  152. def add_augmenters(self, augmenters):
  153. if not isinstance(augmenters, list):
  154. raise Exception(
  155. "augmenters should be list type in func add_augmenters()")
  156. transform_names = [type(x).__name__ for x in self.transforms]
  157. for aug in augmenters:
  158. if type(aug).__name__ in transform_names:
  159. logging.error("{} is already in ComposedTransforms, need to remove it from add_augmenters().".format(type(aug).__name__))
  160. self.transforms = augmenters + self.transforms
  161. class ResizeByShort(DetTransform):
  162. """根据图像的短边调整图像大小(resize)。
  163. 1. 获取图像的长边和短边长度。
  164. 2. 根据短边与short_size的比例,计算长边的目标长度,
  165. 此时高、宽的resize比例为short_size/原图短边长度。
  166. 3. 如果max_size>0,调整resize比例:
  167. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  168. 4. 根据调整大小的比例对图像进行resize。
  169. Args:
  170. target_size (int): 短边目标长度。默认为800。
  171. max_size (int): 长边目标长度的最大限制。默认为1333。
  172. Raises:
  173. TypeError: 形参数据类型不满足需求。
  174. """
  175. def __init__(self, short_size=800, max_size=1333):
  176. self.max_size = int(max_size)
  177. if not isinstance(short_size, int):
  178. raise TypeError(
  179. "Type of short_size is invalid. Must be Integer, now is {}".
  180. format(type(short_size)))
  181. self.short_size = short_size
  182. if not (isinstance(self.max_size, int)):
  183. raise TypeError("max_size: input type is invalid.")
  184. def __call__(self, im, im_info=None, label_info=None):
  185. """
  186. Args:
  187. im (numnp.ndarraypy): 图像np.ndarray数据。
  188. im_info (dict, 可选): 存储与图像相关的信息。
  189. label_info (dict, 可选): 存储与标注框相关的信息。
  190. Returns:
  191. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  192. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  193. 存储与标注框相关信息的字典。
  194. 其中,im_info更新字段为:
  195. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  196. 三者组成的np.ndarray,形状为(3,)。
  197. Raises:
  198. TypeError: 形参数据类型不满足需求。
  199. ValueError: 数据长度不匹配。
  200. """
  201. if im_info is None:
  202. im_info = dict()
  203. if not isinstance(im, np.ndarray):
  204. raise TypeError("ResizeByShort: image type is not numpy.")
  205. if len(im.shape) != 3:
  206. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  207. im_short_size = min(im.shape[0], im.shape[1])
  208. im_long_size = max(im.shape[0], im.shape[1])
  209. scale = float(self.short_size) / im_short_size
  210. if self.max_size > 0 and np.round(scale *
  211. im_long_size) > self.max_size:
  212. scale = float(self.max_size) / float(im_long_size)
  213. resized_width = int(round(im.shape[1] * scale))
  214. resized_height = int(round(im.shape[0] * scale))
  215. im_resize_info = [resized_height, resized_width, scale]
  216. im = cv2.resize(
  217. im, (resized_width, resized_height),
  218. interpolation=cv2.INTER_LINEAR)
  219. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  220. if label_info is None:
  221. return (im, im_info)
  222. else:
  223. return (im, im_info, label_info)
  224. class Padding(DetTransform):
  225. """1.将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  226. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  227. 进行padding,最终输出图像为[320, 640]。
  228. 2.或者,将图像的长和宽padding到target_size指定的shape,如输入的图像为[300,640],
  229. a. `target_size` = 960,在图像最右和最下使用0值进行padding,最终输出
  230. 图像为[960, 960]。
  231. b. `target_size` = [640, 960],在图像最右和最下使用0值进行padding,最终
  232. 输出图像为[640, 960]。
  233. 1. 如果coarsest_stride为1,target_size为None则直接返回。
  234. 2. 获取图像的高H、宽W。
  235. 3. 计算填充后图像的高H_new、宽W_new。
  236. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  237. 并将原图的np.ndarray粘贴于左上角。
  238. Args:
  239. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  240. target_size (int|list|tuple): 填充后的图像长、宽,默认为None,coarset_stride优先级更高。
  241. Raises:
  242. TypeError: 形参`target_size`数据类型不满足需求。
  243. ValueError: 形参`target_size`为(list|tuple)时,长度不满足需求。
  244. """
  245. def __init__(self, coarsest_stride=1, target_size=None):
  246. self.coarsest_stride = coarsest_stride
  247. if target_size is not None:
  248. if not isinstance(target_size, int):
  249. if not isinstance(target_size, tuple) and not isinstance(
  250. target_size, list):
  251. raise TypeError(
  252. "Padding: Type of target_size must in (int|list|tuple)."
  253. )
  254. elif len(target_size) != 2:
  255. raise ValueError(
  256. "Padding: Length of target_size must equal 2.")
  257. self.target_size = target_size
  258. def __call__(self, im, im_info=None, label_info=None):
  259. """
  260. Args:
  261. im (numnp.ndarraypy): 图像np.ndarray数据。
  262. im_info (dict, 可选): 存储与图像相关的信息。
  263. label_info (dict, 可选): 存储与标注框相关的信息。
  264. Returns:
  265. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  266. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  267. 存储与标注框相关信息的字典。
  268. Raises:
  269. TypeError: 形参数据类型不满足需求。
  270. ValueError: 数据长度不匹配。
  271. ValueError: coarsest_stride,target_size需有且只有一个被指定。
  272. ValueError: target_size小于原图的大小。
  273. """
  274. if im_info is None:
  275. im_info = dict()
  276. if not isinstance(im, np.ndarray):
  277. raise TypeError("Padding: image type is not numpy.")
  278. if len(im.shape) != 3:
  279. raise ValueError('Padding: image is not 3-dimensional.')
  280. im_h, im_w, im_c = im.shape[:]
  281. if isinstance(self.target_size, int):
  282. padding_im_h = self.target_size
  283. padding_im_w = self.target_size
  284. elif isinstance(self.target_size, list) or isinstance(self.target_size,
  285. tuple):
  286. padding_im_w = self.target_size[0]
  287. padding_im_h = self.target_size[1]
  288. elif self.coarsest_stride > 0:
  289. padding_im_h = int(
  290. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  291. padding_im_w = int(
  292. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  293. else:
  294. raise ValueError(
  295. "coarsest_stridei(>1) or target_size(list|int) need setting in Padding transform"
  296. )
  297. pad_height = padding_im_h - im_h
  298. pad_width = padding_im_w - im_w
  299. if pad_height < 0 or pad_width < 0:
  300. raise ValueError(
  301. 'the size of image should be less than target_size, but the size of image ({}, {}), is larger than target_size ({}, {})'
  302. .format(im_w, im_h, padding_im_w, padding_im_h))
  303. padding_im = np.zeros(
  304. (padding_im_h, padding_im_w, im_c), dtype=np.float32)
  305. padding_im[:im_h, :im_w, :] = im
  306. if label_info is None:
  307. return (padding_im, im_info)
  308. else:
  309. return (padding_im, im_info, label_info)
  310. class Resize(DetTransform):
  311. """调整图像大小(resize)。
  312. - 当目标大小(target_size)类型为int时,根据插值方式,
  313. 将图像resize为[target_size, target_size]。
  314. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  315. 将图像resize为target_size。
  316. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  317. Args:
  318. target_size (int/list/tuple): 短边目标长度。默认为608。
  319. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  320. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  321. Raises:
  322. TypeError: 形参数据类型不满足需求。
  323. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  324. 'AREA', 'LANCZOS4', 'RANDOM']中。
  325. """
  326. # The interpolation mode
  327. interp_dict = {
  328. 'NEAREST': cv2.INTER_NEAREST,
  329. 'LINEAR': cv2.INTER_LINEAR,
  330. 'CUBIC': cv2.INTER_CUBIC,
  331. 'AREA': cv2.INTER_AREA,
  332. 'LANCZOS4': cv2.INTER_LANCZOS4
  333. }
  334. def __init__(self, target_size=608, interp='LINEAR'):
  335. self.interp = interp
  336. if not (interp == "RANDOM" or interp in self.interp_dict):
  337. raise ValueError("interp should be one of {}".format(
  338. self.interp_dict.keys()))
  339. if isinstance(target_size, list) or isinstance(target_size, tuple):
  340. if len(target_size) != 2:
  341. raise TypeError(
  342. 'when target is list or tuple, it should include 2 elements, but it is {}'
  343. .format(target_size))
  344. elif not isinstance(target_size, int):
  345. raise TypeError(
  346. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  347. .format(type(target_size)))
  348. self.target_size = target_size
  349. def __call__(self, im, im_info=None, label_info=None):
  350. """
  351. Args:
  352. im (np.ndarray): 图像np.ndarray数据。
  353. im_info (dict, 可选): 存储与图像相关的信息。
  354. label_info (dict, 可选): 存储与标注框相关的信息。
  355. Returns:
  356. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  357. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  358. 存储与标注框相关信息的字典。
  359. Raises:
  360. TypeError: 形参数据类型不满足需求。
  361. ValueError: 数据长度不匹配。
  362. """
  363. if im_info is None:
  364. im_info = dict()
  365. if not isinstance(im, np.ndarray):
  366. raise TypeError("Resize: image type is not numpy.")
  367. if len(im.shape) != 3:
  368. raise ValueError('Resize: image is not 3-dimensional.')
  369. if self.interp == "RANDOM":
  370. interp = random.choice(list(self.interp_dict.keys()))
  371. else:
  372. interp = self.interp
  373. im = resize(im, self.target_size, self.interp_dict[interp])
  374. if label_info is None:
  375. return (im, im_info)
  376. else:
  377. return (im, im_info, label_info)
  378. class RandomHorizontalFlip(DetTransform):
  379. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  380. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  381. 执行2-4步操作,否则直接返回。
  382. 2. 水平翻转图像。
  383. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  384. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  385. Args:
  386. prob (float): 随机水平翻转的概率。默认为0.5。
  387. Raises:
  388. TypeError: 形参数据类型不满足需求。
  389. """
  390. def __init__(self, prob=0.5):
  391. self.prob = prob
  392. if not isinstance(self.prob, float):
  393. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  394. def __call__(self, im, im_info=None, label_info=None):
  395. """
  396. Args:
  397. im (np.ndarray): 图像np.ndarray数据。
  398. im_info (dict, 可选): 存储与图像相关的信息。
  399. label_info (dict, 可选): 存储与标注框相关的信息。
  400. Returns:
  401. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  402. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  403. 存储与标注框相关信息的字典。
  404. 其中,im_info更新字段为:
  405. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  406. 其中n代表真实标注框的个数。
  407. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  408. 其中n代表真实标注框的个数。
  409. Raises:
  410. TypeError: 形参数据类型不满足需求。
  411. ValueError: 数据长度不匹配。
  412. """
  413. if not isinstance(im, np.ndarray):
  414. raise TypeError(
  415. "RandomHorizontalFlip: image is not a numpy array.")
  416. if len(im.shape) != 3:
  417. raise ValueError(
  418. "RandomHorizontalFlip: image is not 3-dimensional.")
  419. if im_info is None or label_info is None:
  420. raise TypeError(
  421. 'Cannot do RandomHorizontalFlip! ' +
  422. 'Becasuse the im_info and label_info can not be None!')
  423. if 'gt_bbox' not in label_info:
  424. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  425. 'Becasuse gt_bbox is not in label_info!')
  426. image_shape = im_info['image_shape']
  427. gt_bbox = label_info['gt_bbox']
  428. height = image_shape[0]
  429. width = image_shape[1]
  430. if np.random.uniform(0, 1) < self.prob:
  431. im = horizontal_flip(im)
  432. if gt_bbox.shape[0] == 0:
  433. if label_info is None:
  434. return (im, im_info)
  435. else:
  436. return (im, im_info, label_info)
  437. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  438. if 'gt_poly' in label_info and \
  439. len(label_info['gt_poly']) != 0:
  440. label_info['gt_poly'] = segms_horizontal_flip(
  441. label_info['gt_poly'], height, width)
  442. if label_info is None:
  443. return (im, im_info)
  444. else:
  445. return (im, im_info, label_info)
  446. class Normalize(DetTransform):
  447. """对图像进行标准化。
  448. 1. 归一化图像到到区间[0.0, 1.0]。
  449. 2. 对图像进行减均值除以标准差操作。
  450. Args:
  451. mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
  452. std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
  453. Raises:
  454. TypeError: 形参数据类型不满足需求。
  455. """
  456. def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
  457. self.mean = mean
  458. self.std = std
  459. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  460. raise TypeError("NormalizeImage: input type is invalid.")
  461. from functools import reduce
  462. if reduce(lambda x, y: x * y, self.std) == 0:
  463. raise TypeError('NormalizeImage: std is invalid!')
  464. def __call__(self, im, im_info=None, label_info=None):
  465. """
  466. Args:
  467. im (numnp.ndarraypy): 图像np.ndarray数据。
  468. im_info (dict, 可选): 存储与图像相关的信息。
  469. label_info (dict, 可选): 存储与标注框相关的信息。
  470. Returns:
  471. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  472. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  473. 存储与标注框相关信息的字典。
  474. """
  475. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  476. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  477. im = normalize(im, mean, std)
  478. if label_info is None:
  479. return (im, im_info)
  480. else:
  481. return (im, im_info, label_info)
  482. class RandomDistort(DetTransform):
  483. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  484. 1. 对变换的操作顺序进行随机化操作。
  485. 2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
  486. Args:
  487. brightness_range (float): 明亮度因子的范围。默认为0.5。
  488. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  489. contrast_range (float): 对比度因子的范围。默认为0.5。
  490. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  491. saturation_range (float): 饱和度因子的范围。默认为0.5。
  492. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  493. hue_range (int): 色调因子的范围。默认为18。
  494. hue_prob (float): 随机调整色调的概率。默认为0.5。
  495. """
  496. def __init__(self,
  497. brightness_range=0.5,
  498. brightness_prob=0.5,
  499. contrast_range=0.5,
  500. contrast_prob=0.5,
  501. saturation_range=0.5,
  502. saturation_prob=0.5,
  503. hue_range=18,
  504. hue_prob=0.5):
  505. self.brightness_range = brightness_range
  506. self.brightness_prob = brightness_prob
  507. self.contrast_range = contrast_range
  508. self.contrast_prob = contrast_prob
  509. self.saturation_range = saturation_range
  510. self.saturation_prob = saturation_prob
  511. self.hue_range = hue_range
  512. self.hue_prob = hue_prob
  513. def __call__(self, im, im_info=None, label_info=None):
  514. """
  515. Args:
  516. im (np.ndarray): 图像np.ndarray数据。
  517. im_info (dict, 可选): 存储与图像相关的信息。
  518. label_info (dict, 可选): 存储与标注框相关的信息。
  519. Returns:
  520. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  521. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  522. 存储与标注框相关信息的字典。
  523. """
  524. brightness_lower = 1 - self.brightness_range
  525. brightness_upper = 1 + self.brightness_range
  526. contrast_lower = 1 - self.contrast_range
  527. contrast_upper = 1 + self.contrast_range
  528. saturation_lower = 1 - self.saturation_range
  529. saturation_upper = 1 + self.saturation_range
  530. hue_lower = -self.hue_range
  531. hue_upper = self.hue_range
  532. ops = [brightness, contrast, saturation, hue]
  533. random.shuffle(ops)
  534. params_dict = {
  535. 'brightness': {
  536. 'brightness_lower': brightness_lower,
  537. 'brightness_upper': brightness_upper
  538. },
  539. 'contrast': {
  540. 'contrast_lower': contrast_lower,
  541. 'contrast_upper': contrast_upper
  542. },
  543. 'saturation': {
  544. 'saturation_lower': saturation_lower,
  545. 'saturation_upper': saturation_upper
  546. },
  547. 'hue': {
  548. 'hue_lower': hue_lower,
  549. 'hue_upper': hue_upper
  550. }
  551. }
  552. prob_dict = {
  553. 'brightness': self.brightness_prob,
  554. 'contrast': self.contrast_prob,
  555. 'saturation': self.saturation_prob,
  556. 'hue': self.hue_prob
  557. }
  558. for id in range(4):
  559. params = params_dict[ops[id].__name__]
  560. prob = prob_dict[ops[id].__name__]
  561. params['im'] = im
  562. if np.random.uniform(0, 1) < prob:
  563. im = ops[id](**params)
  564. im = im.astype('float32')
  565. if label_info is None:
  566. return (im, im_info)
  567. else:
  568. return (im, im_info, label_info)
  569. class MixupImage(DetTransform):
  570. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  571. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  572. 1. 从随机beta分布中抽取出随机因子factor。
  573. 2.
  574. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  575. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  576. - 其余情况,执行下述操作:
  577. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  578. (2)拼接原图像标注框和mixup图像标注框。
  579. (3)拼接原图像标注框类别和mixup图像标注框类别。
  580. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  581. 3. 更新im_info中的image_shape信息。
  582. Args:
  583. alpha (float): 随机beta分布的下限。默认为1.5。
  584. beta (float): 随机beta分布的上限。默认为1.5。
  585. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  586. 默认为-1。
  587. Raises:
  588. ValueError: 数据长度不匹配。
  589. """
  590. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  591. self.alpha = alpha
  592. self.beta = beta
  593. if self.alpha <= 0.0:
  594. raise ValueError("alpha shold be positive in MixupImage")
  595. if self.beta <= 0.0:
  596. raise ValueError("beta shold be positive in MixupImage")
  597. self.mixup_epoch = mixup_epoch
  598. def _mixup_img(self, img1, img2, factor):
  599. h = max(img1.shape[0], img2.shape[0])
  600. w = max(img1.shape[1], img2.shape[1])
  601. img = np.zeros((h, w, img1.shape[2]), 'float32')
  602. img[:img1.shape[0], :img1.shape[1], :] = \
  603. img1.astype('float32') * factor
  604. img[:img2.shape[0], :img2.shape[1], :] += \
  605. img2.astype('float32') * (1.0 - factor)
  606. return img.astype('float32')
  607. def __call__(self, im, im_info=None, label_info=None):
  608. """
  609. Args:
  610. im (np.ndarray): 图像np.ndarray数据。
  611. im_info (dict, 可选): 存储与图像相关的信息。
  612. label_info (dict, 可选): 存储与标注框相关的信息。
  613. Returns:
  614. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  615. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  616. 存储与标注框相关信息的字典。
  617. 其中,im_info更新字段为:
  618. - image_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  619. im_info删除的字段:
  620. - mixup (list): 与当前字段进行mixup的图像相关信息。
  621. label_info更新字段为:
  622. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  623. 其中n代表真实标注框的个数。
  624. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  625. 其中n代表真实标注框的个数。
  626. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  627. 其中n代表真实标注框的个数。
  628. Raises:
  629. TypeError: 形参数据类型不满足需求。
  630. """
  631. if im_info is None:
  632. raise TypeError('Cannot do MixupImage! ' +
  633. 'Becasuse the im_info can not be None!')
  634. if 'mixup' not in im_info:
  635. if label_info is None:
  636. return (im, im_info)
  637. else:
  638. return (im, im_info, label_info)
  639. factor = np.random.beta(self.alpha, self.beta)
  640. factor = max(0.0, min(1.0, factor))
  641. if im_info['epoch'] > self.mixup_epoch \
  642. or factor >= 1.0:
  643. im_info.pop('mixup')
  644. if label_info is None:
  645. return (im, im_info)
  646. else:
  647. return (im, im_info, label_info)
  648. if factor <= 0.0:
  649. return im_info.pop('mixup')
  650. im = self._mixup_img(im, im_info['mixup'][0], factor)
  651. if label_info is None:
  652. raise TypeError('Cannot do MixupImage! ' +
  653. 'Becasuse the label_info can not be None!')
  654. if 'gt_bbox' not in label_info or \
  655. 'gt_class' not in label_info or \
  656. 'gt_score' not in label_info:
  657. raise TypeError('Cannot do MixupImage! ' + \
  658. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  659. gt_bbox1 = label_info['gt_bbox']
  660. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  661. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  662. gt_class1 = label_info['gt_class']
  663. gt_class2 = im_info['mixup'][2]['gt_class']
  664. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  665. gt_score1 = label_info['gt_score']
  666. gt_score2 = im_info['mixup'][2]['gt_score']
  667. gt_score = np.concatenate(
  668. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  669. if 'gt_poly' in label_info:
  670. gt_poly1 = label_info['gt_poly']
  671. gt_poly2 = im_info['mixup'][2]['gt_poly']
  672. label_info['gt_poly'] = gt_poly1 + gt_poly2
  673. is_crowd1 = label_info['is_crowd']
  674. is_crowd2 = im_info['mixup'][2]['is_crowd']
  675. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  676. label_info['gt_bbox'] = gt_bbox
  677. label_info['gt_score'] = gt_score
  678. label_info['gt_class'] = gt_class
  679. label_info['is_crowd'] = is_crowd
  680. im_info['image_shape'] = np.array([im.shape[0],
  681. im.shape[1]]).astype('int32')
  682. im_info.pop('mixup')
  683. if label_info is None:
  684. return (im, im_info)
  685. else:
  686. return (im, im_info, label_info)
  687. class RandomExpand(DetTransform):
  688. """随机扩张图像,模型训练时的数据增强操作。
  689. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  690. 2. 计算扩张后图像大小。
  691. 3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
  692. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  693. 5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
  694. Args:
  695. ratio (float): 图像扩张的最大比例。默认为4.0。
  696. prob (float): 随机扩张的概率。默认为0.5。
  697. fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
  698. """
  699. def __init__(self,
  700. ratio=4.,
  701. prob=0.5,
  702. fill_value=[123.675, 116.28, 103.53]):
  703. super(RandomExpand, self).__init__()
  704. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  705. self.ratio = ratio
  706. self.prob = prob
  707. assert isinstance(fill_value, Sequence), \
  708. "fill value must be sequence"
  709. if not isinstance(fill_value, tuple):
  710. fill_value = tuple(fill_value)
  711. self.fill_value = fill_value
  712. def __call__(self, im, im_info=None, label_info=None):
  713. """
  714. Args:
  715. im (np.ndarray): 图像np.ndarray数据。
  716. im_info (dict, 可选): 存储与图像相关的信息。
  717. label_info (dict, 可选): 存储与标注框相关的信息。
  718. Returns:
  719. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  720. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  721. 存储与标注框相关信息的字典。
  722. 其中,im_info更新字段为:
  723. - image_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  724. label_info更新字段为:
  725. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  726. 其中n代表真实标注框的个数。
  727. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  728. 其中n代表真实标注框的个数。
  729. Raises:
  730. TypeError: 形参数据类型不满足需求。
  731. """
  732. if im_info is None or label_info is None:
  733. raise TypeError(
  734. 'Cannot do RandomExpand! ' +
  735. 'Becasuse the im_info and label_info can not be None!')
  736. if 'gt_bbox' not in label_info or \
  737. 'gt_class' not in label_info:
  738. raise TypeError('Cannot do RandomExpand! ' + \
  739. 'Becasuse gt_bbox/gt_class is not in label_info!')
  740. if np.random.uniform(0., 1.) < self.prob:
  741. return (im, im_info, label_info)
  742. image_shape = im_info['image_shape']
  743. height = int(image_shape[0])
  744. width = int(image_shape[1])
  745. expand_ratio = np.random.uniform(1., self.ratio)
  746. h = int(height * expand_ratio)
  747. w = int(width * expand_ratio)
  748. if not h > height or not w > width:
  749. return (im, im_info, label_info)
  750. y = np.random.randint(0, h - height)
  751. x = np.random.randint(0, w - width)
  752. canvas = np.ones((h, w, 3), dtype=np.float32)
  753. canvas *= np.array(self.fill_value, dtype=np.float32)
  754. canvas[y:y + height, x:x + width, :] = im
  755. im_info['image_shape'] = np.array([h, w]).astype('int32')
  756. if 'gt_bbox' in label_info and len(label_info['gt_bbox']) > 0:
  757. label_info['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)
  758. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  759. label_info['gt_poly'] = expand_segms(label_info['gt_poly'], x, y,
  760. height, width, expand_ratio)
  761. return (canvas, im_info, label_info)
  762. class RandomCrop(DetTransform):
  763. """随机裁剪图像。
  764. 1. 若allow_no_crop为True,则在thresholds加入’no_crop’。
  765. 2. 随机打乱thresholds。
  766. 3. 遍历thresholds中各元素:
  767. (1) 如果当前thresh为’no_crop’,则返回原始图像和标注信息。
  768. (2) 随机取出aspect_ratio和scaling中的值并由此计算出候选裁剪区域的高、宽、起始点。
  769. (3) 计算真实标注框与候选裁剪区域IoU,若全部真实标注框的IoU都小于thresh,则继续第3步。
  770. (4) 如果cover_all_box为True且存在真实标注框的IoU小于thresh,则继续第3步。
  771. (5) 筛选出位于候选裁剪区域内的真实标注框,若有效框的个数为0,则继续第3步,否则进行第4步。
  772. 4. 换算有效真值标注框相对候选裁剪区域的位置坐标。
  773. 5. 换算有效分割区域相对候选裁剪区域的位置坐标。
  774. Args:
  775. aspect_ratio (list): 裁剪后短边缩放比例的取值范围,以[min, max]形式表示。默认值为[.5, 2.]。
  776. thresholds (list): 判断裁剪候选区域是否有效所需的IoU阈值取值列表。默认值为[.0, .1, .3, .5, .7, .9]。
  777. scaling (list): 裁剪面积相对原面积的取值范围,以[min, max]形式表示。默认值为[.3, 1.]。
  778. num_attempts (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
  779. allow_no_crop (bool): 是否允许未进行裁剪。默认值为True。
  780. cover_all_box (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
  781. """
  782. def __init__(self,
  783. aspect_ratio=[.5, 2.],
  784. thresholds=[.0, .1, .3, .5, .7, .9],
  785. scaling=[.3, 1.],
  786. num_attempts=50,
  787. allow_no_crop=True,
  788. cover_all_box=False):
  789. self.aspect_ratio = aspect_ratio
  790. self.thresholds = thresholds
  791. self.scaling = scaling
  792. self.num_attempts = num_attempts
  793. self.allow_no_crop = allow_no_crop
  794. self.cover_all_box = cover_all_box
  795. def __call__(self, im, im_info=None, label_info=None):
  796. """
  797. Args:
  798. im (np.ndarray): 图像np.ndarray数据。
  799. im_info (dict, 可选): 存储与图像相关的信息。
  800. label_info (dict, 可选): 存储与标注框相关的信息。
  801. Returns:
  802. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  803. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  804. 存储与标注框相关信息的字典。
  805. 其中,im_info更新字段为:
  806. - image_shape (np.ndarray): 扩裁剪的图像高、宽二者组成的np.ndarray,形状为(2,)。
  807. label_info更新字段为:
  808. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  809. 其中n代表真实标注框的个数。
  810. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  811. 其中n代表真实标注框的个数。
  812. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  813. 其中n代表真实标注框的个数。
  814. Raises:
  815. TypeError: 形参数据类型不满足需求。
  816. """
  817. if im_info is None or label_info is None:
  818. raise TypeError(
  819. 'Cannot do RandomCrop! ' +
  820. 'Becasuse the im_info and label_info can not be None!')
  821. if 'gt_bbox' not in label_info or \
  822. 'gt_class' not in label_info:
  823. raise TypeError('Cannot do RandomCrop! ' + \
  824. 'Becasuse gt_bbox/gt_class is not in label_info!')
  825. if len(label_info['gt_bbox']) == 0:
  826. return (im, im_info, label_info)
  827. image_shape = im_info['image_shape']
  828. w = image_shape[1]
  829. h = image_shape[0]
  830. gt_bbox = label_info['gt_bbox']
  831. thresholds = list(self.thresholds)
  832. if self.allow_no_crop:
  833. thresholds.append('no_crop')
  834. np.random.shuffle(thresholds)
  835. for thresh in thresholds:
  836. if thresh == 'no_crop':
  837. return (im, im_info, label_info)
  838. found = False
  839. for i in range(self.num_attempts):
  840. scale = np.random.uniform(*self.scaling)
  841. min_ar, max_ar = self.aspect_ratio
  842. aspect_ratio = np.random.uniform(
  843. max(min_ar, scale**2), min(max_ar, scale**-2))
  844. crop_h = int(h * scale / np.sqrt(aspect_ratio))
  845. crop_w = int(w * scale * np.sqrt(aspect_ratio))
  846. crop_y = np.random.randint(0, h - crop_h)
  847. crop_x = np.random.randint(0, w - crop_w)
  848. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  849. iou = iou_matrix(
  850. gt_bbox, np.array(
  851. [crop_box], dtype=np.float32))
  852. if iou.max() < thresh:
  853. continue
  854. if self.cover_all_box and iou.min() < thresh:
  855. continue
  856. cropped_box, valid_ids = crop_box_with_center_constraint(
  857. gt_bbox, np.array(
  858. crop_box, dtype=np.float32))
  859. if valid_ids.size > 0:
  860. found = True
  861. break
  862. if found:
  863. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  864. crop_polys = crop_segms(
  865. label_info['gt_poly'],
  866. valid_ids,
  867. np.array(
  868. crop_box, dtype=np.int64),
  869. h,
  870. w)
  871. if [] in crop_polys:
  872. delete_id = list()
  873. valid_polys = list()
  874. for id, crop_poly in enumerate(crop_polys):
  875. if crop_poly == []:
  876. delete_id.append(id)
  877. else:
  878. valid_polys.append(crop_poly)
  879. valid_ids = np.delete(valid_ids, delete_id)
  880. if len(valid_polys) == 0:
  881. return (im, im_info, label_info)
  882. label_info['gt_poly'] = valid_polys
  883. else:
  884. label_info['gt_poly'] = crop_polys
  885. im = crop_image(im, crop_box)
  886. label_info['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  887. label_info['gt_class'] = np.take(
  888. label_info['gt_class'], valid_ids, axis=0)
  889. im_info['image_shape'] = np.array(
  890. [crop_box[3] - crop_box[1],
  891. crop_box[2] - crop_box[0]]).astype('int32')
  892. if 'gt_score' in label_info:
  893. label_info['gt_score'] = np.take(
  894. label_info['gt_score'], valid_ids, axis=0)
  895. if 'is_crowd' in label_info:
  896. label_info['is_crowd'] = np.take(
  897. label_info['is_crowd'], valid_ids, axis=0)
  898. return (im, im_info, label_info)
  899. return (im, im_info, label_info)
  900. class ArrangeFasterRCNN(DetTransform):
  901. """获取FasterRCNN模型训练/验证/预测所需信息。
  902. Args:
  903. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  904. Raises:
  905. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  906. """
  907. def __init__(self, mode=None):
  908. if mode not in ['train', 'eval', 'test', 'quant']:
  909. raise ValueError(
  910. "mode must be in ['train', 'eval', 'test', 'quant']!")
  911. self.mode = mode
  912. def __call__(self, im, im_info=None, label_info=None):
  913. """
  914. Args:
  915. im (np.ndarray): 图像np.ndarray数据。
  916. im_info (dict, 可选): 存储与图像相关的信息。
  917. label_info (dict, 可选): 存储与标注框相关的信息。
  918. Returns:
  919. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  920. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  921. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  922. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  923. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  924. 图像相当对于原图的resize信息、图像大小信息。
  925. Raises:
  926. TypeError: 形参数据类型不满足需求。
  927. ValueError: 数据长度不匹配。
  928. """
  929. im = permute(im, False)
  930. if self.mode == 'train':
  931. if im_info is None or label_info is None:
  932. raise TypeError(
  933. 'Cannot do ArrangeFasterRCNN! ' +
  934. 'Becasuse the im_info and label_info can not be None!')
  935. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  936. raise ValueError("gt num mismatch: bbox and class.")
  937. im_resize_info = im_info['im_resize_info']
  938. gt_bbox = label_info['gt_bbox']
  939. gt_class = label_info['gt_class']
  940. is_crowd = label_info['is_crowd']
  941. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  942. elif self.mode == 'eval':
  943. if im_info is None or label_info is None:
  944. raise TypeError(
  945. 'Cannot do ArrangeFasterRCNN! ' +
  946. 'Becasuse the im_info and label_info can not be None!')
  947. im_resize_info = im_info['im_resize_info']
  948. im_id = im_info['im_id']
  949. im_shape = np.array(
  950. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  951. dtype=np.float32)
  952. gt_bbox = label_info['gt_bbox']
  953. gt_class = label_info['gt_class']
  954. is_difficult = label_info['difficult']
  955. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  956. is_difficult)
  957. else:
  958. if im_info is None:
  959. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  960. 'Becasuse the im_info can not be None!')
  961. im_resize_info = im_info['im_resize_info']
  962. im_shape = np.array(
  963. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  964. dtype=np.float32)
  965. outputs = (im, im_resize_info, im_shape)
  966. return outputs
  967. class ArrangeMaskRCNN(DetTransform):
  968. """获取MaskRCNN模型训练/验证/预测所需信息。
  969. Args:
  970. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  971. Raises:
  972. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  973. """
  974. def __init__(self, mode=None):
  975. if mode not in ['train', 'eval', 'test', 'quant']:
  976. raise ValueError(
  977. "mode must be in ['train', 'eval', 'test', 'quant']!")
  978. self.mode = mode
  979. def __call__(self, im, im_info=None, label_info=None):
  980. """
  981. Args:
  982. im (np.ndarray): 图像np.ndarray数据。
  983. im_info (dict, 可选): 存储与图像相关的信息。
  984. label_info (dict, 可选): 存储与标注框相关的信息。
  985. Returns:
  986. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  987. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  988. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  989. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  990. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  991. Raises:
  992. TypeError: 形参数据类型不满足需求。
  993. ValueError: 数据长度不匹配。
  994. """
  995. im = permute(im, False)
  996. if self.mode == 'train':
  997. if im_info is None or label_info is None:
  998. raise TypeError(
  999. 'Cannot do ArrangeTrainMaskRCNN! ' +
  1000. 'Becasuse the im_info and label_info can not be None!')
  1001. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1002. raise ValueError("gt num mismatch: bbox and class.")
  1003. im_resize_info = im_info['im_resize_info']
  1004. gt_bbox = label_info['gt_bbox']
  1005. gt_class = label_info['gt_class']
  1006. is_crowd = label_info['is_crowd']
  1007. assert 'gt_poly' in label_info
  1008. segms = label_info['gt_poly']
  1009. if len(segms) != 0:
  1010. assert len(segms) == is_crowd.shape[0]
  1011. gt_masks = []
  1012. valid = True
  1013. for i in range(len(segms)):
  1014. segm = segms[i]
  1015. gt_segm = []
  1016. if is_crowd[i]:
  1017. gt_segm.append([[0, 0]])
  1018. else:
  1019. for poly in segm:
  1020. if len(poly) == 0:
  1021. valid = False
  1022. break
  1023. gt_segm.append(np.array(poly).reshape(-1, 2))
  1024. if (not valid) or len(gt_segm) == 0:
  1025. break
  1026. gt_masks.append(gt_segm)
  1027. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  1028. gt_masks)
  1029. else:
  1030. if im_info is None:
  1031. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  1032. 'Becasuse the im_info can not be None!')
  1033. im_resize_info = im_info['im_resize_info']
  1034. im_shape = np.array(
  1035. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  1036. dtype=np.float32)
  1037. if self.mode == 'eval':
  1038. im_id = im_info['im_id']
  1039. outputs = (im, im_resize_info, im_id, im_shape)
  1040. else:
  1041. outputs = (im, im_resize_info, im_shape)
  1042. return outputs
  1043. class ArrangeYOLOv3(DetTransform):
  1044. """获取YOLOv3模型训练/验证/预测所需信息。
  1045. Args:
  1046. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  1047. Raises:
  1048. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  1049. """
  1050. def __init__(self, mode=None):
  1051. if mode not in ['train', 'eval', 'test', 'quant']:
  1052. raise ValueError(
  1053. "mode must be in ['train', 'eval', 'test', 'quant']!")
  1054. self.mode = mode
  1055. def __call__(self, im, im_info=None, label_info=None):
  1056. """
  1057. Args:
  1058. im (np.ndarray): 图像np.ndarray数据。
  1059. im_info (dict, 可选): 存储与图像相关的信息。
  1060. label_info (dict, 可选): 存储与标注框相关的信息。
  1061. Returns:
  1062. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  1063. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  1064. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  1065. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  1066. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  1067. 分别对应图像np.ndarray数据、图像大小信息。
  1068. Raises:
  1069. TypeError: 形参数据类型不满足需求。
  1070. ValueError: 数据长度不匹配。
  1071. """
  1072. im = permute(im, False)
  1073. if self.mode == 'train':
  1074. if im_info is None or label_info is None:
  1075. raise TypeError(
  1076. 'Cannot do ArrangeYolov3! ' +
  1077. 'Becasuse the im_info and label_info can not be None!')
  1078. im_shape = im_info['image_shape']
  1079. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1080. raise ValueError("gt num mismatch: bbox and class.")
  1081. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1082. raise ValueError("gt num mismatch: bbox and score.")
  1083. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1084. gt_class = np.zeros((50, ), dtype=np.int32)
  1085. gt_score = np.zeros((50, ), dtype=im.dtype)
  1086. gt_num = min(50, len(label_info['gt_bbox']))
  1087. if gt_num > 0:
  1088. label_info['gt_class'][:gt_num, 0] = label_info[
  1089. 'gt_class'][:gt_num, 0] - 1
  1090. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1091. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1092. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1093. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1094. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1095. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1096. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1097. elif self.mode == 'eval':
  1098. if im_info is None or label_info is None:
  1099. raise TypeError(
  1100. 'Cannot do ArrangeYolov3! ' +
  1101. 'Becasuse the im_info and label_info can not be None!')
  1102. im_shape = im_info['image_shape']
  1103. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1104. raise ValueError("gt num mismatch: bbox and class.")
  1105. im_id = im_info['im_id']
  1106. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1107. gt_class = np.zeros((50, ), dtype=np.int32)
  1108. difficult = np.zeros((50, ), dtype=np.int32)
  1109. gt_num = min(50, len(label_info['gt_bbox']))
  1110. if gt_num > 0:
  1111. label_info['gt_class'][:gt_num, 0] = label_info[
  1112. 'gt_class'][:gt_num, 0] - 1
  1113. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1114. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1115. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1116. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1117. else:
  1118. if im_info is None:
  1119. raise TypeError('Cannot do ArrangeYolov3! ' +
  1120. 'Becasuse the im_info can not be None!')
  1121. im_shape = im_info['image_shape']
  1122. outputs = (im, im_shape)
  1123. return outputs
  1124. class ComposedRCNNTransforms(Compose):
  1125. """ RCNN模型(faster-rcnn/mask-rcnn)图像处理流程,具体如下,
  1126. 训练阶段:
  1127. 1. 随机以0.5的概率将图像水平翻转
  1128. 2. 图像归一化
  1129. 3. 图像按比例Resize,scale计算方式如下
  1130. scale = min_max_size[0] / short_size_of_image
  1131. if max_size_of_image * scale > min_max_size[1]:
  1132. scale = min_max_size[1] / max_size_of_image
  1133. 4. 将3步骤的长宽进行padding,使得长宽为32的倍数
  1134. 验证阶段:
  1135. 1. 图像归一化
  1136. 2. 图像按比例Resize,scale计算方式同上训练阶段
  1137. 3. 将2步骤的长宽进行padding,使得长宽为32的倍数
  1138. Args:
  1139. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1140. min_max_size(list): 图像在缩放时,最小边和最大边的约束条件
  1141. mean(list): 图像均值
  1142. std(list): 图像方差
  1143. """
  1144. def __init__(self,
  1145. mode,
  1146. min_max_size=[800, 1333],
  1147. mean=[0.485, 0.456, 0.406],
  1148. std=[0.229, 0.224, 0.225]):
  1149. if mode == 'train':
  1150. # 训练时的transforms,包含数据增强
  1151. transforms = [
  1152. RandomHorizontalFlip(prob=0.5), Normalize(
  1153. mean=mean, std=std), ResizeByShort(
  1154. short_size=min_max_size[0], max_size=min_max_size[1]),
  1155. Padding(coarsest_stride=32)
  1156. ]
  1157. else:
  1158. # 验证/预测时的transforms
  1159. transforms = [
  1160. Normalize(
  1161. mean=mean, std=std), ResizeByShort(
  1162. short_size=min_max_size[0], max_size=min_max_size[1]),
  1163. Padding(coarsest_stride=32)
  1164. ]
  1165. super(ComposedRCNNTransforms, self).__init__(transforms)
  1166. class ComposedYOLOv3Transforms(Compose):
  1167. """YOLOv3模型的图像预处理流程,具体如下,
  1168. 训练阶段:
  1169. 1. 在前mixup_epoch轮迭代中,使用MixupImage策略,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#mixupimage
  1170. 2. 对图像进行随机扰动,包括亮度,对比度,饱和度和色调
  1171. 3. 随机扩充图像,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#randomexpand
  1172. 4. 随机裁剪图像
  1173. 5. 将4步骤的输出图像Resize成shape参数的大小
  1174. 6. 随机0.5的概率水平翻转图像
  1175. 7. 图像归一化
  1176. 验证/预测阶段:
  1177. 1. 将图像Resize成shape参数大小
  1178. 2. 图像归一化
  1179. Args:
  1180. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1181. shape(list): 输入模型中图像的大小,输入模型的图像会被Resize成此大小
  1182. mixup_epoch(int): 模型训练过程中,前mixup_epoch会使用mixup策略
  1183. mean(list): 图像均值
  1184. std(list): 图像方差
  1185. """
  1186. def __init__(self,
  1187. mode,
  1188. shape=[608, 608],
  1189. mixup_epoch=250,
  1190. mean=[0.485, 0.456, 0.406],
  1191. std=[0.229, 0.224, 0.225]):
  1192. width = shape
  1193. if isinstance(shape, list):
  1194. if shape[0] != shape[1]:
  1195. raise Exception(
  1196. "In YOLOv3 model, width and height should be equal")
  1197. width = shape[0]
  1198. if width % 32 != 0:
  1199. raise Exception(
  1200. "In YOLOv3 model, width and height should be multiple of 32, e.g 224、256、320...."
  1201. )
  1202. if mode == 'train':
  1203. # 训练时的transforms,包含数据增强
  1204. transforms = [
  1205. MixupImage(mixup_epoch=mixup_epoch), RandomDistort(),
  1206. RandomExpand(), RandomCrop(), Resize(
  1207. target_size=width,
  1208. interp='RANDOM'), RandomHorizontalFlip(), Normalize(
  1209. mean=mean, std=std)
  1210. ]
  1211. else:
  1212. # 验证/预测时的transforms
  1213. transforms = [
  1214. Resize(
  1215. target_size=width, interp='CUBIC'), Normalize(
  1216. mean=mean, std=std)
  1217. ]
  1218. super(ComposedYOLOv3Transforms, self).__init__(transforms)