det_transforms.py 77 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. try:
  15. from collections.abc import Sequence
  16. except Exception:
  17. from collections import Sequence
  18. import random
  19. import os.path as osp
  20. import numpy as np
  21. import cv2
  22. from .imgaug_support import execute_imgaug
  23. from .ops import *
  24. from .box_utils import *
  25. import paddlex.utils.logging as logging
  26. class DetTransform:
  27. """检测数据处理基类
  28. """
  29. def __init__(self):
  30. pass
  31. class Compose(DetTransform):
  32. """根据数据预处理/增强列表对输入数据进行操作。
  33. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  34. Args:
  35. transforms (list): 数据预处理/增强列表。
  36. Raises:
  37. TypeError: 形参数据类型不满足需求。
  38. ValueError: 数据长度不匹配。
  39. """
  40. def __init__(self, transforms):
  41. if not isinstance(transforms, list):
  42. raise TypeError('The transforms must be a list!')
  43. if len(transforms) < 1:
  44. raise ValueError('The length of transforms ' + \
  45. 'must be equal or larger than 1!')
  46. self.transforms = transforms
  47. self.batch_transforms = None
  48. self.use_mixup = False
  49. self.data_type = np.uint8
  50. self.to_rgb = True
  51. for t in self.transforms:
  52. if type(t).__name__ == 'MixupImage':
  53. self.use_mixup = True
  54. # 检查transforms里面的操作,目前支持PaddleX定义的或者是imgaug操作
  55. for op in self.transforms:
  56. if not isinstance(op, DetTransform):
  57. import imgaug.augmenters as iaa
  58. if not isinstance(op, iaa.Augmenter):
  59. raise Exception(
  60. "Elements in transforms should be defined in 'paddlex.det.transforms' or class of imgaug.augmenters.Augmenter, see docs here: https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/"
  61. )
  62. def __call__(self, im, im_info=None, label_info=None):
  63. """
  64. Args:
  65. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  66. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  67. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  68. - image_shape (np.ndarray): 图像原始大小,形状为(2,),
  69. image_shape[0]为高,image_shape[1]为宽。
  70. - mixup (list): list为[im, im_info, label_info],分别对应
  71. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  72. 注意,当前epoch若无需进行mixup,则无该字段。
  73. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  74. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  75. 其中n代表真实标注框的个数。
  76. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  77. 其中n代表真实标注框的个数。
  78. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  79. 其中n代表真实标注框的个数。
  80. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  81. 长度为n,其中n代表真实标注框的个数。
  82. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  83. 其中n代表真实标注框的个数。
  84. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  85. 其中n代表真实标注框的个数。
  86. Returns:
  87. tuple: 根据网络所需字段所组成的tuple;
  88. 字段由transforms中的最后一个数据预处理操作决定。
  89. """
  90. def decode_image(im_file, im_info, label_info, input_channel=3):
  91. if im_info is None:
  92. im_info = dict()
  93. if isinstance(im_file, np.ndarray):
  94. if len(im_file.shape) != 3:
  95. raise Exception(
  96. "im should be 3-dimensions, but now is {}-dimensions".
  97. format(len(im_file.shape)))
  98. im = im_file
  99. else:
  100. try:
  101. if input_channel == 3:
  102. im = cv2.imread(im_file, cv2.IMREAD_ANYDEPTH |
  103. cv2.IMREAD_ANYCOLOR)
  104. else:
  105. im = cv2.imread(im_file, cv2.IMREAD_UNCHANGED)
  106. if im.ndim < 3:
  107. im = np.expand_dims(im, axis=-1)
  108. except:
  109. raise TypeError('Can\'t read The image file {}!'.format(
  110. im_file))
  111. self.data_type = im.dtype
  112. im = im.astype('float32')
  113. if input_channel == 3 and self.to_rgb:
  114. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  115. # make default im_info with [h, w, 1]
  116. im_info['im_resize_info'] = np.array(
  117. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  118. im_info['image_shape'] = np.array([im.shape[0],
  119. im.shape[1]]).astype('int32')
  120. if not self.use_mixup:
  121. if 'mixup' in im_info:
  122. del im_info['mixup']
  123. # decode mixup image
  124. if 'mixup' in im_info:
  125. im_info['mixup'] = \
  126. decode_image(im_info['mixup'][0],
  127. im_info['mixup'][1],
  128. im_info['mixup'][2],
  129. input_channel)
  130. if label_info is None:
  131. return (im, im_info)
  132. else:
  133. return (im, im_info, label_info)
  134. input_channel = getattr(self, 'input_channel', 3)
  135. outputs = decode_image(im, im_info, label_info, input_channel)
  136. im = outputs[0]
  137. im_info = outputs[1]
  138. if len(outputs) == 3:
  139. label_info = outputs[2]
  140. for op in self.transforms:
  141. if im is None:
  142. return None
  143. if isinstance(op, DetTransform):
  144. if op.__class__.__name__ == 'RandomDistort':
  145. op.to_rgb = self.to_rgb
  146. op.data_type = self.data_type
  147. outputs = op(im, im_info, label_info)
  148. im = outputs[0]
  149. else:
  150. import imgaug.augmenters as iaa
  151. if im.shape[-1] != 3:
  152. raise Exception(
  153. "Only the 3-channel RGB image is supported in the imgaug operator, but recieved image channel is {}".
  154. format(im.shape[-1]))
  155. if isinstance(op, iaa.Augmenter):
  156. im = execute_imgaug(op, im)
  157. if label_info is not None:
  158. outputs = (im, im_info, label_info)
  159. else:
  160. outputs = (im, im_info)
  161. return outputs
  162. def add_augmenters(self, augmenters):
  163. if not isinstance(augmenters, list):
  164. raise Exception(
  165. "augmenters should be list type in func add_augmenters()")
  166. transform_names = [type(x).__name__ for x in self.transforms]
  167. for aug in augmenters:
  168. if type(aug).__name__ in transform_names:
  169. logging.error(
  170. "{} is already in ComposedTransforms, need to remove it from add_augmenters().".
  171. format(type(aug).__name__))
  172. self.transforms = augmenters + self.transforms
  173. class ResizeByShort(DetTransform):
  174. """根据图像的短边调整图像大小(resize)。
  175. 1. 获取图像的长边和短边长度。
  176. 2. 根据短边与short_size的比例,计算长边的目标长度,
  177. 此时高、宽的resize比例为short_size/原图短边长度。
  178. 若short_size为数组,则随机从该数组中挑选一个数值
  179. 作为short_size。
  180. 3. 如果max_size>0,调整resize比例:
  181. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  182. 4. 根据调整大小的比例对图像进行resize。
  183. Args:
  184. short_size (int|list): 短边目标长度。默认为800。
  185. max_size (int): 长边目标长度的最大限制。默认为1333。
  186. Raises:
  187. TypeError: 形参数据类型不满足需求。
  188. """
  189. def __init__(self, short_size=800, max_size=1333):
  190. self.max_size = int(max_size)
  191. if not (isinstance(short_size, int) or isinstance(short_size, list)):
  192. raise TypeError(
  193. "Type of short_size is invalid. Must be Integer or List, now is {}".
  194. format(type(short_size)))
  195. self.short_size = short_size
  196. if not (isinstance(self.max_size, int)):
  197. raise TypeError("max_size: input type is invalid.")
  198. def __call__(self, im, im_info=None, label_info=None):
  199. """
  200. Args:
  201. im (numnp.ndarraypy): 图像np.ndarray数据。
  202. im_info (dict, 可选): 存储与图像相关的信息。
  203. label_info (dict, 可选): 存储与标注框相关的信息。
  204. Returns:
  205. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  206. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  207. 存储与标注框相关信息的字典。
  208. 其中,im_info更新字段为:
  209. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  210. 三者组成的np.ndarray,形状为(3,)。
  211. Raises:
  212. TypeError: 形参数据类型不满足需求。
  213. ValueError: 数据长度不匹配。
  214. """
  215. if im_info is None:
  216. im_info = dict()
  217. if not isinstance(im, np.ndarray):
  218. raise TypeError("ResizeByShort: image type is not numpy.")
  219. if len(im.shape) != 3:
  220. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  221. im_short_size = min(im.shape[0], im.shape[1])
  222. im_long_size = max(im.shape[0], im.shape[1])
  223. if isinstance(self.short_size, list):
  224. # Case for multi-scale training
  225. selected_size = random.choice(self.short_size)
  226. else:
  227. selected_size = self.short_size
  228. scale = float(selected_size) / im_short_size
  229. if self.max_size > 0 and np.round(scale *
  230. im_long_size) > self.max_size:
  231. scale = float(self.max_size) / float(im_long_size)
  232. resized_width = int(round(im.shape[1] * scale))
  233. resized_height = int(round(im.shape[0] * scale))
  234. im_resize_info = [resized_height, resized_width, scale]
  235. im = cv2.resize(
  236. im, (resized_width, resized_height),
  237. interpolation=cv2.INTER_LINEAR)
  238. if im.ndim < 3:
  239. im = np.expand_dims(im, axis=-1)
  240. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  241. if label_info is None:
  242. return (im, im_info)
  243. else:
  244. return (im, im_info, label_info)
  245. class Padding(DetTransform):
  246. """1.将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  247. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  248. 进行padding,最终输出图像为[320, 640]。
  249. 2.或者,将图像的长和宽padding到target_size指定的shape,如输入的图像为[300,640],
  250. a. `target_size` = 960,在图像最右和最下使用0值进行padding,最终输出
  251. 图像为[960, 960]。
  252. b. `target_size` = [640, 960],在图像最右和最下使用0值进行padding,最终
  253. 输出图像为[640, 960]。
  254. 1. 如果coarsest_stride为1,target_size为None则直接返回。
  255. 2. 获取图像的高H、宽W。
  256. 3. 计算填充后图像的高H_new、宽W_new。
  257. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  258. 并将原图的np.ndarray粘贴于左上角。
  259. Args:
  260. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  261. target_size (int|list|tuple): 填充后的图像长、宽,默认为None,coarset_stride优先级更高。
  262. Raises:
  263. TypeError: 形参`target_size`数据类型不满足需求。
  264. ValueError: 形参`target_size`为(list|tuple)时,长度不满足需求。
  265. """
  266. def __init__(self, coarsest_stride=1, target_size=None):
  267. self.coarsest_stride = coarsest_stride
  268. if target_size is not None:
  269. if not isinstance(target_size, int):
  270. if not isinstance(target_size, tuple) and not isinstance(
  271. target_size, list):
  272. raise TypeError(
  273. "Padding: Type of target_size must in (int|list|tuple)."
  274. )
  275. elif len(target_size) != 2:
  276. raise ValueError(
  277. "Padding: Length of target_size must equal 2.")
  278. self.target_size = target_size
  279. def __call__(self, im, im_info=None, label_info=None):
  280. """
  281. Args:
  282. im (numnp.ndarraypy): 图像np.ndarray数据。
  283. im_info (dict, 可选): 存储与图像相关的信息。
  284. label_info (dict, 可选): 存储与标注框相关的信息。
  285. Returns:
  286. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  287. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  288. 存储与标注框相关信息的字典。
  289. Raises:
  290. TypeError: 形参数据类型不满足需求。
  291. ValueError: 数据长度不匹配。
  292. ValueError: coarsest_stride,target_size需有且只有一个被指定。
  293. ValueError: target_size小于原图的大小。
  294. """
  295. if im_info is None:
  296. im_info = dict()
  297. if not isinstance(im, np.ndarray):
  298. raise TypeError("Padding: image type is not numpy.")
  299. if len(im.shape) != 3:
  300. raise ValueError('Padding: image is not 3-dimensional.')
  301. im_h, im_w, im_c = im.shape[:]
  302. if isinstance(self.target_size, int):
  303. padding_im_h = self.target_size
  304. padding_im_w = self.target_size
  305. elif isinstance(self.target_size, list) or isinstance(self.target_size,
  306. tuple):
  307. padding_im_w = self.target_size[0]
  308. padding_im_h = self.target_size[1]
  309. elif self.coarsest_stride > 0:
  310. padding_im_h = int(
  311. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  312. padding_im_w = int(
  313. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  314. else:
  315. raise ValueError(
  316. "coarsest_stridei(>1) or target_size(list|int) need setting in Padding transform"
  317. )
  318. pad_height = padding_im_h - im_h
  319. pad_width = padding_im_w - im_w
  320. if pad_height < 0 or pad_width < 0:
  321. raise ValueError(
  322. 'the size of image should be less than target_size, but the size of image ({}, {}), is larger than target_size ({}, {})'
  323. .format(im_w, im_h, padding_im_w, padding_im_h))
  324. padding_im = np.zeros(
  325. (padding_im_h, padding_im_w, im_c), dtype=np.float32)
  326. padding_im[:im_h, :im_w, :] = im
  327. if label_info is None:
  328. return (padding_im, im_info)
  329. else:
  330. return (padding_im, im_info, label_info)
  331. class Resize(DetTransform):
  332. """调整图像大小(resize)。
  333. - 当目标大小(target_size)类型为int时,根据插值方式,
  334. 将图像resize为[target_size, target_size]。
  335. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  336. 将图像resize为target_size。
  337. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  338. Args:
  339. target_size (int/list/tuple): 短边目标长度。默认为608。
  340. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  341. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  342. Raises:
  343. TypeError: 形参数据类型不满足需求。
  344. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  345. 'AREA', 'LANCZOS4', 'RANDOM']中。
  346. """
  347. # The interpolation mode
  348. interp_dict = {
  349. 'NEAREST': cv2.INTER_NEAREST,
  350. 'LINEAR': cv2.INTER_LINEAR,
  351. 'CUBIC': cv2.INTER_CUBIC,
  352. 'AREA': cv2.INTER_AREA,
  353. 'LANCZOS4': cv2.INTER_LANCZOS4
  354. }
  355. def __init__(self, target_size=608, interp='LINEAR'):
  356. self.interp = interp
  357. if not (interp == "RANDOM" or interp in self.interp_dict):
  358. raise ValueError("interp should be one of {}".format(
  359. self.interp_dict.keys()))
  360. if isinstance(target_size, list) or isinstance(target_size, tuple):
  361. if len(target_size) != 2:
  362. raise TypeError(
  363. 'when target is list or tuple, it should include 2 elements, but it is {}'
  364. .format(target_size))
  365. elif not isinstance(target_size, int):
  366. raise TypeError(
  367. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  368. .format(type(target_size)))
  369. self.target_size = target_size
  370. def __call__(self, im, im_info=None, label_info=None):
  371. """
  372. Args:
  373. im (np.ndarray): 图像np.ndarray数据。
  374. im_info (dict, 可选): 存储与图像相关的信息。
  375. label_info (dict, 可选): 存储与标注框相关的信息。
  376. Returns:
  377. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  378. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  379. 存储与标注框相关信息的字典。
  380. Raises:
  381. TypeError: 形参数据类型不满足需求。
  382. ValueError: 数据长度不匹配。
  383. """
  384. if im_info is None:
  385. im_info = dict()
  386. if not isinstance(im, np.ndarray):
  387. raise TypeError("Resize: image type is not numpy.")
  388. if len(im.shape) != 3:
  389. raise ValueError('Resize: image is not 3-dimensional.')
  390. if self.interp == "RANDOM":
  391. interp = random.choice(list(self.interp_dict.keys()))
  392. else:
  393. interp = self.interp
  394. im = resize(im, self.target_size, self.interp_dict[interp])
  395. if label_info is None:
  396. return (im, im_info)
  397. else:
  398. return (im, im_info, label_info)
  399. class RandomHorizontalFlip(DetTransform):
  400. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  401. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  402. 执行2-4步操作,否则直接返回。
  403. 2. 水平翻转图像。
  404. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  405. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  406. Args:
  407. prob (float): 随机水平翻转的概率。默认为0.5。
  408. Raises:
  409. TypeError: 形参数据类型不满足需求。
  410. """
  411. def __init__(self, prob=0.5):
  412. self.prob = prob
  413. if not isinstance(self.prob, float):
  414. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  415. def __call__(self, im, im_info=None, label_info=None):
  416. """
  417. Args:
  418. im (np.ndarray): 图像np.ndarray数据。
  419. im_info (dict, 可选): 存储与图像相关的信息。
  420. label_info (dict, 可选): 存储与标注框相关的信息。
  421. Returns:
  422. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  423. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  424. 存储与标注框相关信息的字典。
  425. 其中,im_info更新字段为:
  426. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  427. 其中n代表真实标注框的个数。
  428. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  429. 其中n代表真实标注框的个数。
  430. Raises:
  431. TypeError: 形参数据类型不满足需求。
  432. ValueError: 数据长度不匹配。
  433. """
  434. if not isinstance(im, np.ndarray):
  435. raise TypeError(
  436. "RandomHorizontalFlip: image is not a numpy array.")
  437. if len(im.shape) != 3:
  438. raise ValueError(
  439. "RandomHorizontalFlip: image is not 3-dimensional.")
  440. if im_info is None or label_info is None:
  441. raise TypeError(
  442. 'Cannot do RandomHorizontalFlip! ' +
  443. 'Becasuse the im_info and label_info can not be None!')
  444. if 'gt_bbox' not in label_info:
  445. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  446. 'Becasuse gt_bbox is not in label_info!')
  447. image_shape = im_info['image_shape']
  448. gt_bbox = label_info['gt_bbox']
  449. height = image_shape[0]
  450. width = image_shape[1]
  451. if np.random.uniform(0, 1) < self.prob:
  452. im = horizontal_flip(im)
  453. if gt_bbox.shape[0] == 0:
  454. if label_info is None:
  455. return (im, im_info)
  456. else:
  457. return (im, im_info, label_info)
  458. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  459. if 'gt_poly' in label_info and \
  460. len(label_info['gt_poly']) != 0:
  461. label_info['gt_poly'] = segms_horizontal_flip(
  462. label_info['gt_poly'], height, width)
  463. if label_info is None:
  464. return (im, im_info)
  465. else:
  466. return (im, im_info, label_info)
  467. class Normalize(DetTransform):
  468. """对图像进行标准化。
  469. 1.像素值减去min_val
  470. 2.像素值除以(max_val-min_val)
  471. 3.对图像进行减均值除以标准差操作。
  472. Args:
  473. mean (list): 图像数据集的均值。默认值[0.5, 0.5, 0.5]。
  474. std (list): 图像数据集的标准差。默认值[0.5, 0.5, 0.5]。
  475. min_val (list): 图像数据集的最小值。默认值[0, 0, 0]。
  476. max_val (list): 图像数据集的最大值。默认值[255.0, 255.0, 255.0]。
  477. Raises:
  478. TypeError: 形参数据类型不满足需求。
  479. """
  480. def __init__(self,
  481. mean=[0.485, 0.456, 0.406],
  482. std=[0.229, 0.224, 0.225],
  483. min_val=[0, 0, 0],
  484. max_val=[255.0, 255.0, 255.0]):
  485. self.mean = mean
  486. self.std = std
  487. self.min_val = min_val
  488. self.max_val = max_val
  489. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  490. raise TypeError("NormalizeImage: input type is invalid.")
  491. if not (isinstance(self.min_val, list) and isinstance(self.max_val,
  492. list)):
  493. raise ValueError("{}: input type is invalid.".format(self))
  494. from functools import reduce
  495. if reduce(lambda x, y: x * y, self.std) == 0:
  496. raise TypeError('NormalizeImage: std is invalid!')
  497. def __call__(self, im, im_info=None, label_info=None):
  498. """
  499. Args:
  500. im (numnp.ndarraypy): 图像np.ndarray数据。
  501. im_info (dict, 可选): 存储与图像相关的信息。
  502. label_info (dict, 可选): 存储与标注框相关的信息。
  503. Returns:
  504. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  505. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  506. 存储与标注框相关信息的字典。
  507. """
  508. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  509. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  510. im = normalize(im, mean, std, self.min_val, self.max_val)
  511. if label_info is None:
  512. return (im, im_info)
  513. else:
  514. return (im, im_info, label_info)
  515. class RandomDistort(DetTransform):
  516. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  517. 1. 对变换的操作顺序进行随机化操作。
  518. 2. 按照1中的顺序以一定的概率对图像进行随机像素内容变换。
  519. 【注意】如果输入是uint8/uint16的RGB图像,该数据增强必须在数据增强Normalize之前使用。
  520. Args:
  521. brightness_range (float): 明亮度的缩放系数范围。
  522. 从[1-`brightness_range`, 1+`brightness_range`]中随机取值作为明亮度缩放因子`scale`,
  523. 按照公式`image = image * scale`调整图像明亮度。默认值为0.5。
  524. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  525. contrast_range (float): 对比度的缩放系数范围。
  526. 从[1-`contrast_range`, 1+`contrast_range`]中随机取值作为对比度缩放因子`scale`,
  527. 按照公式`image = image * scale + (image_mean + 0.5) * (1 - scale)`调整图像对比度。默认为0.5。
  528. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  529. saturation_range (float): 饱和度的缩放系数范围。
  530. 从[1-`saturation_range`, 1+`saturation_range`]中随机取值作为饱和度缩放因子`scale`,
  531. 按照公式`image = gray * (1 - scale) + image * scale`,
  532. 其中`gray = R * 299/1000 + G * 587/1000+ B * 114/1000`。默认为0.5。
  533. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  534. hue_range (int): 调整色相角度的差值取值范围。
  535. 从[-`hue_range`, `hue_range`]中随机取值作为色相角度调整差值`delta`,
  536. 按照公式`hue = hue + delta`调整色相角度 。默认为18,取值范围[0, 360]。
  537. hue_prob (float): 随机调整色调的概率。默认为0.5。
  538. """
  539. def __init__(self,
  540. brightness_range=0.5,
  541. brightness_prob=0.5,
  542. contrast_range=0.5,
  543. contrast_prob=0.5,
  544. saturation_range=0.5,
  545. saturation_prob=0.5,
  546. hue_range=18,
  547. hue_prob=0.5):
  548. self.brightness_range = brightness_range
  549. self.brightness_prob = brightness_prob
  550. self.contrast_range = contrast_range
  551. self.contrast_prob = contrast_prob
  552. self.saturation_range = saturation_range
  553. self.saturation_prob = saturation_prob
  554. self.hue_range = hue_range
  555. self.hue_prob = hue_prob
  556. def __call__(self, im, im_info=None, label_info=None):
  557. """
  558. Args:
  559. im (np.ndarray): 图像np.ndarray数据。
  560. im_info (dict, 可选): 存储与图像相关的信息。
  561. label_info (dict, 可选): 存储与标注框相关的信息。
  562. Returns:
  563. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  564. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  565. 存储与标注框相关信息的字典。
  566. """
  567. if im.shape[-1] != 3:
  568. raise Exception(
  569. "Only the 3-channel RGB image is supported in the RandomDistort operator, but recieved image channel is {}".
  570. format(im.shape[-1]))
  571. if self.data_type not in [np.uint8, np.uint16, np.float32]:
  572. raise Exception(
  573. "Only the uint8/uint16/float32 RGB image is supported in the RandomDistort operator, but recieved image data type is {}".
  574. format(self.data_type))
  575. brightness_lower = 1 - self.brightness_range
  576. brightness_upper = 1 + self.brightness_range
  577. contrast_lower = 1 - self.contrast_range
  578. contrast_upper = 1 + self.contrast_range
  579. saturation_lower = 1 - self.saturation_range
  580. saturation_upper = 1 + self.saturation_range
  581. hue_lower = -self.hue_range
  582. hue_upper = self.hue_range
  583. ops = [brightness, contrast, saturation, hue]
  584. random.shuffle(ops)
  585. params_dict = {
  586. 'brightness': {
  587. 'brightness_lower': brightness_lower,
  588. 'brightness_upper': brightness_upper,
  589. 'dtype': self.data_type
  590. },
  591. 'contrast': {
  592. 'contrast_lower': contrast_lower,
  593. 'contrast_upper': contrast_upper,
  594. 'dtype': self.data_type
  595. },
  596. 'saturation': {
  597. 'saturation_lower': saturation_lower,
  598. 'saturation_upper': saturation_upper,
  599. 'is_rgb': self.to_rgb,
  600. 'dtype': self.data_type
  601. },
  602. 'hue': {
  603. 'hue_lower': hue_lower,
  604. 'hue_upper': hue_upper,
  605. 'is_rgb': self.to_rgb,
  606. 'dtype': self.data_type
  607. }
  608. }
  609. prob_dict = {
  610. 'brightness': self.brightness_prob,
  611. 'contrast': self.contrast_prob,
  612. 'saturation': self.saturation_prob,
  613. 'hue': self.hue_prob
  614. }
  615. for id in range(4):
  616. params = params_dict[ops[id].__name__]
  617. prob = prob_dict[ops[id].__name__]
  618. params['im'] = im
  619. if np.random.uniform(0, 1) < prob:
  620. im = ops[id](**params)
  621. im = im.astype('float32')
  622. if label_info is None:
  623. return (im, im_info)
  624. else:
  625. return (im, im_info, label_info)
  626. class MixupImage(DetTransform):
  627. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  628. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  629. 1. 从随机beta分布中抽取出随机因子factor。
  630. 2.
  631. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  632. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  633. - 其余情况,执行下述操作:
  634. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  635. (2)拼接原图像标注框和mixup图像标注框。
  636. (3)拼接原图像标注框类别和mixup图像标注框类别。
  637. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  638. 3. 更新im_info中的image_shape信息。
  639. Args:
  640. alpha (float): 随机beta分布的下限。默认为1.5。
  641. beta (float): 随机beta分布的上限。默认为1.5。
  642. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  643. 默认为-1。
  644. Raises:
  645. ValueError: 数据长度不匹配。
  646. """
  647. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  648. self.alpha = alpha
  649. self.beta = beta
  650. if self.alpha <= 0.0:
  651. raise ValueError("alpha shold be positive in MixupImage")
  652. if self.beta <= 0.0:
  653. raise ValueError("beta shold be positive in MixupImage")
  654. self.mixup_epoch = mixup_epoch
  655. def _mixup_img(self, img1, img2, factor):
  656. h = max(img1.shape[0], img2.shape[0])
  657. w = max(img1.shape[1], img2.shape[1])
  658. img = np.zeros((h, w, img1.shape[2]), 'float32')
  659. img[:img1.shape[0], :img1.shape[1], :] = \
  660. img1.astype('float32') * factor
  661. img[:img2.shape[0], :img2.shape[1], :] += \
  662. img2.astype('float32') * (1.0 - factor)
  663. return img.astype('float32')
  664. def __call__(self, im, im_info=None, label_info=None):
  665. """
  666. Args:
  667. im (np.ndarray): 图像np.ndarray数据。
  668. im_info (dict, 可选): 存储与图像相关的信息。
  669. label_info (dict, 可选): 存储与标注框相关的信息。
  670. Returns:
  671. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  672. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  673. 存储与标注框相关信息的字典。
  674. 其中,im_info更新字段为:
  675. - image_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  676. im_info删除的字段:
  677. - mixup (list): 与当前字段进行mixup的图像相关信息。
  678. label_info更新字段为:
  679. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  680. 其中n代表真实标注框的个数。
  681. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  682. 其中n代表真实标注框的个数。
  683. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  684. 其中n代表真实标注框的个数。
  685. Raises:
  686. TypeError: 形参数据类型不满足需求。
  687. """
  688. if im_info is None:
  689. raise TypeError('Cannot do MixupImage! ' +
  690. 'Becasuse the im_info can not be None!')
  691. if 'mixup' not in im_info:
  692. if label_info is None:
  693. return (im, im_info)
  694. else:
  695. return (im, im_info, label_info)
  696. factor = np.random.beta(self.alpha, self.beta)
  697. factor = max(0.0, min(1.0, factor))
  698. if im_info['epoch'] > self.mixup_epoch \
  699. or factor >= 1.0:
  700. im_info.pop('mixup')
  701. if label_info is None:
  702. return (im, im_info)
  703. else:
  704. return (im, im_info, label_info)
  705. if factor <= 0.0:
  706. return im_info.pop('mixup')
  707. im = self._mixup_img(im, im_info['mixup'][0], factor)
  708. if label_info is None:
  709. raise TypeError('Cannot do MixupImage! ' +
  710. 'Becasuse the label_info can not be None!')
  711. if 'gt_bbox' not in label_info or \
  712. 'gt_class' not in label_info or \
  713. 'gt_score' not in label_info:
  714. raise TypeError('Cannot do MixupImage! ' + \
  715. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  716. gt_bbox1 = label_info['gt_bbox']
  717. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  718. gt_class1 = label_info['gt_class']
  719. gt_class2 = im_info['mixup'][2]['gt_class']
  720. gt_score1 = label_info['gt_score']
  721. gt_score2 = im_info['mixup'][2]['gt_score']
  722. if 'gt_poly' in label_info:
  723. gt_poly1 = label_info['gt_poly']
  724. gt_poly2 = im_info['mixup'][2]['gt_poly']
  725. is_crowd1 = label_info['is_crowd']
  726. is_crowd2 = im_info['mixup'][2]['is_crowd']
  727. if 0 not in gt_class1 and 0 not in gt_class2:
  728. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  729. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  730. gt_score = np.concatenate(
  731. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  732. if 'gt_poly' in label_info:
  733. label_info['gt_poly'] = gt_poly1 + gt_poly2
  734. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  735. elif 0 in gt_class1:
  736. gt_bbox = gt_bbox2
  737. gt_class = gt_class2
  738. gt_score = gt_score2 * (1. - factor)
  739. if 'gt_poly' in label_info:
  740. label_info['gt_poly'] = gt_poly2
  741. is_crowd = is_crowd2
  742. else:
  743. gt_bbox = gt_bbox1
  744. gt_class = gt_class1
  745. gt_score = gt_score1 * factor
  746. if 'gt_poly' in label_info:
  747. label_info['gt_poly'] = gt_poly1
  748. is_crowd = is_crowd1
  749. label_info['gt_bbox'] = gt_bbox
  750. label_info['gt_score'] = gt_score
  751. label_info['gt_class'] = gt_class
  752. label_info['is_crowd'] = is_crowd
  753. im_info['image_shape'] = np.array([im.shape[0],
  754. im.shape[1]]).astype('int32')
  755. im_info.pop('mixup')
  756. if label_info is None:
  757. return (im, im_info)
  758. else:
  759. return (im, im_info, label_info)
  760. class RandomExpand(DetTransform):
  761. """随机扩张图像,模型训练时的数据增强操作。
  762. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  763. 2. 计算扩张后图像大小。
  764. 3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
  765. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  766. 5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
  767. Args:
  768. ratio (float): 图像扩张的最大比例。默认为4.0。
  769. prob (float): 随机扩张的概率。默认为0.5。
  770. fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
  771. """
  772. def __init__(self,
  773. ratio=4.,
  774. prob=0.5,
  775. fill_value=[123.675, 116.28, 103.53]):
  776. super(RandomExpand, self).__init__()
  777. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  778. self.ratio = ratio
  779. self.prob = prob
  780. assert isinstance(fill_value, Sequence), \
  781. "fill value must be sequence"
  782. if not isinstance(fill_value, tuple):
  783. fill_value = tuple(fill_value)
  784. self.fill_value = fill_value
  785. def __call__(self, im, im_info=None, label_info=None):
  786. """
  787. Args:
  788. im (np.ndarray): 图像np.ndarray数据。
  789. im_info (dict, 可选): 存储与图像相关的信息。
  790. label_info (dict, 可选): 存储与标注框相关的信息。
  791. Returns:
  792. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  793. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  794. 存储与标注框相关信息的字典。
  795. 其中,im_info更新字段为:
  796. - image_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  797. label_info更新字段为:
  798. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  799. 其中n代表真实标注框的个数。
  800. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  801. 其中n代表真实标注框的个数。
  802. Raises:
  803. TypeError: 形参数据类型不满足需求。
  804. """
  805. if im_info is None or label_info is None:
  806. raise TypeError(
  807. 'Cannot do RandomExpand! ' +
  808. 'Becasuse the im_info and label_info can not be None!')
  809. if 'gt_bbox' not in label_info or \
  810. 'gt_class' not in label_info:
  811. raise TypeError('Cannot do RandomExpand! ' + \
  812. 'Becasuse gt_bbox/gt_class is not in label_info!')
  813. if np.random.uniform(0., 1.) > self.prob:
  814. return (im, im_info, label_info)
  815. if 'gt_class' in label_info and 0 in label_info['gt_class']:
  816. return (im, im_info, label_info)
  817. image_shape = im_info['image_shape']
  818. height = int(image_shape[0])
  819. width = int(image_shape[1])
  820. expand_ratio = np.random.uniform(1., self.ratio)
  821. h = int(height * expand_ratio)
  822. w = int(width * expand_ratio)
  823. if not h > height or not w > width:
  824. return (im, im_info, label_info)
  825. y = np.random.randint(0, h - height)
  826. x = np.random.randint(0, w - width)
  827. canvas = np.ones((h, w, 3), dtype=np.float32)
  828. canvas *= np.array(self.fill_value, dtype=np.float32)
  829. canvas[y:y + height, x:x + width, :] = im
  830. im_info['image_shape'] = np.array([h, w]).astype('int32')
  831. if 'gt_bbox' in label_info and len(label_info['gt_bbox']) > 0:
  832. label_info['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)
  833. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  834. label_info['gt_poly'] = expand_segms(label_info['gt_poly'], x, y,
  835. height, width, expand_ratio)
  836. return (canvas, im_info, label_info)
  837. class RandomCrop(DetTransform):
  838. """随机裁剪图像。
  839. 1. 若allow_no_crop为True,则在thresholds加入’no_crop’。
  840. 2. 随机打乱thresholds。
  841. 3. 遍历thresholds中各元素:
  842. (1) 如果当前thresh为’no_crop’,则返回原始图像和标注信息。
  843. (2) 随机取出aspect_ratio和scaling中的值并由此计算出候选裁剪区域的高、宽、起始点。
  844. (3) 计算真实标注框与候选裁剪区域IoU,若全部真实标注框的IoU都小于thresh,则继续第3步。
  845. (4) 如果cover_all_box为True且存在真实标注框的IoU小于thresh,则继续第3步。
  846. (5) 筛选出位于候选裁剪区域内的真实标注框,若有效框的个数为0,则继续第3步,否则进行第4步。
  847. 4. 换算有效真值标注框相对候选裁剪区域的位置坐标。
  848. 5. 换算有效分割区域相对候选裁剪区域的位置坐标。
  849. Args:
  850. aspect_ratio (list): 裁剪后短边缩放比例的取值范围,以[min, max]形式表示。默认值为[.5, 2.]。
  851. thresholds (list): 判断裁剪候选区域是否有效所需的IoU阈值取值列表。默认值为[.0, .1, .3, .5, .7, .9]。
  852. scaling (list): 裁剪面积相对原面积的取值范围,以[min, max]形式表示。默认值为[.3, 1.]。
  853. num_attempts (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
  854. allow_no_crop (bool): 是否允许未进行裁剪。默认值为True。
  855. cover_all_box (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
  856. """
  857. def __init__(self,
  858. aspect_ratio=[.5, 2.],
  859. thresholds=[.0, .1, .3, .5, .7, .9],
  860. scaling=[.3, 1.],
  861. num_attempts=50,
  862. allow_no_crop=True,
  863. cover_all_box=False):
  864. self.aspect_ratio = aspect_ratio
  865. self.thresholds = thresholds
  866. self.scaling = scaling
  867. self.num_attempts = num_attempts
  868. self.allow_no_crop = allow_no_crop
  869. self.cover_all_box = cover_all_box
  870. def __call__(self, im, im_info=None, label_info=None):
  871. """
  872. Args:
  873. im (np.ndarray): 图像np.ndarray数据。
  874. im_info (dict, 可选): 存储与图像相关的信息。
  875. label_info (dict, 可选): 存储与标注框相关的信息。
  876. Returns:
  877. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  878. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  879. 存储与标注框相关信息的字典。
  880. 其中,im_info更新字段为:
  881. - image_shape (np.ndarray): 扩裁剪的图像高、宽二者组成的np.ndarray,形状为(2,)。
  882. label_info更新字段为:
  883. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  884. 其中n代表真实标注框的个数。
  885. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  886. 其中n代表真实标注框的个数。
  887. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  888. 其中n代表真实标注框的个数。
  889. Raises:
  890. TypeError: 形参数据类型不满足需求。
  891. """
  892. if im_info is None or label_info is None:
  893. raise TypeError(
  894. 'Cannot do RandomCrop! ' +
  895. 'Becasuse the im_info and label_info can not be None!')
  896. if 'gt_bbox' not in label_info or \
  897. 'gt_class' not in label_info:
  898. raise TypeError('Cannot do RandomCrop! ' + \
  899. 'Becasuse gt_bbox/gt_class is not in label_info!')
  900. if len(label_info['gt_bbox']) == 0:
  901. return (im, im_info, label_info)
  902. if 'gt_class' in label_info and 0 in label_info['gt_class']:
  903. return (im, im_info, label_info)
  904. image_shape = im_info['image_shape']
  905. w = image_shape[1]
  906. h = image_shape[0]
  907. gt_bbox = label_info['gt_bbox']
  908. thresholds = list(self.thresholds)
  909. if self.allow_no_crop:
  910. thresholds.append('no_crop')
  911. np.random.shuffle(thresholds)
  912. for thresh in thresholds:
  913. if thresh == 'no_crop':
  914. return (im, im_info, label_info)
  915. found = False
  916. for i in range(self.num_attempts):
  917. scale = np.random.uniform(*self.scaling)
  918. min_ar, max_ar = self.aspect_ratio
  919. aspect_ratio = np.random.uniform(
  920. max(min_ar, scale**2), min(max_ar, scale**-2))
  921. crop_h = int(h * scale / np.sqrt(aspect_ratio))
  922. crop_w = int(w * scale * np.sqrt(aspect_ratio))
  923. crop_y = np.random.randint(0, h - crop_h)
  924. crop_x = np.random.randint(0, w - crop_w)
  925. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  926. iou = iou_matrix(
  927. gt_bbox, np.array(
  928. [crop_box], dtype=np.float32))
  929. if iou.max() < thresh:
  930. continue
  931. if self.cover_all_box and iou.min() < thresh:
  932. continue
  933. cropped_box, valid_ids = crop_box_with_center_constraint(
  934. gt_bbox, np.array(
  935. crop_box, dtype=np.float32))
  936. if valid_ids.size > 0:
  937. found = True
  938. break
  939. if found:
  940. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  941. crop_polys = crop_segms(
  942. label_info['gt_poly'],
  943. valid_ids,
  944. np.array(
  945. crop_box, dtype=np.int64),
  946. h,
  947. w)
  948. if [] in crop_polys:
  949. delete_id = list()
  950. valid_polys = list()
  951. for id, crop_poly in enumerate(crop_polys):
  952. if crop_poly == []:
  953. delete_id.append(id)
  954. else:
  955. valid_polys.append(crop_poly)
  956. valid_ids = np.delete(valid_ids, delete_id)
  957. if len(valid_polys) == 0:
  958. return (im, im_info, label_info)
  959. label_info['gt_poly'] = valid_polys
  960. else:
  961. label_info['gt_poly'] = crop_polys
  962. im = crop_image(im, crop_box)
  963. label_info['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  964. label_info['gt_class'] = np.take(
  965. label_info['gt_class'], valid_ids, axis=0)
  966. im_info['image_shape'] = np.array(
  967. [crop_box[3] - crop_box[1],
  968. crop_box[2] - crop_box[0]]).astype('int32')
  969. if 'gt_score' in label_info:
  970. label_info['gt_score'] = np.take(
  971. label_info['gt_score'], valid_ids, axis=0)
  972. if 'is_crowd' in label_info:
  973. label_info['is_crowd'] = np.take(
  974. label_info['is_crowd'], valid_ids, axis=0)
  975. return (im, im_info, label_info)
  976. return (im, im_info, label_info)
  977. class CLAHE(DetTransform):
  978. """对图像进行对比度增强。
  979. Args:
  980. clip_limit (int|float): 颜色对比度的阈值,默认值为2.。
  981. tile_grid_size (list|tuple): 进行像素均衡化的网格大小。默认值为(8, 8)。
  982. Raises:
  983. TypeError: 形参数据类型不满足需求。
  984. """
  985. def __init__(self, clip_limit=2., tile_grid_size=(8, 8)):
  986. self.clip_limit = clip_limit
  987. self.tile_grid_size = tile_grid_size
  988. def __call__(self, im, im_info=None, label_info=None):
  989. """
  990. Args:
  991. im (numnp.ndarraypy): 图像np.ndarray数据。
  992. im_info (dict, 可选): 存储与图像相关的信息。
  993. label_info (dict, 可选): 存储与标注框相关的信息。
  994. Returns:
  995. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  996. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  997. 存储与标注框相关信息的字典。
  998. """
  999. if im.shape[-1] != 1:
  1000. raise Exception(
  1001. "Only the one-channel image is supported in the CLAHE operator, but recieved image channel is {}".
  1002. format(im.shape[-1]))
  1003. clahe = cv2.createCLAHE(
  1004. clipLimit=self.clip_limit, tileGridSize=self.tile_grid_size)
  1005. im = clahe.apply(im).astype(im.dtype)
  1006. if label_info is None:
  1007. return (im, im_info)
  1008. else:
  1009. return (im, im_info, label_info)
  1010. class ArrangeFasterRCNN(DetTransform):
  1011. """获取FasterRCNN模型训练/验证/预测所需信息。
  1012. Args:
  1013. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  1014. Raises:
  1015. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  1016. """
  1017. def __init__(self, mode=None):
  1018. if mode not in ['train', 'eval', 'test', 'quant']:
  1019. raise ValueError(
  1020. "mode must be in ['train', 'eval', 'test', 'quant']!")
  1021. self.mode = mode
  1022. def __call__(self, im, im_info=None, label_info=None):
  1023. """
  1024. Args:
  1025. im (np.ndarray): 图像np.ndarray数据。
  1026. im_info (dict, 可选): 存储与图像相关的信息。
  1027. label_info (dict, 可选): 存储与标注框相关的信息。
  1028. Returns:
  1029. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  1030. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  1031. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  1032. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  1033. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  1034. 图像相当对于原图的resize信息、图像大小信息。
  1035. Raises:
  1036. TypeError: 形参数据类型不满足需求。
  1037. ValueError: 数据长度不匹配。
  1038. """
  1039. im = permute(im, False)
  1040. if self.mode == 'train':
  1041. if im_info is None or label_info is None:
  1042. raise TypeError(
  1043. 'Cannot do ArrangeFasterRCNN! ' +
  1044. 'Becasuse the im_info and label_info can not be None!')
  1045. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1046. raise ValueError("gt num mismatch: bbox and class.")
  1047. im_resize_info = im_info['im_resize_info']
  1048. gt_bbox = label_info['gt_bbox']
  1049. gt_class = label_info['gt_class']
  1050. is_crowd = label_info['is_crowd']
  1051. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  1052. elif self.mode == 'eval':
  1053. if im_info is None or label_info is None:
  1054. raise TypeError(
  1055. 'Cannot do ArrangeFasterRCNN! ' +
  1056. 'Becasuse the im_info and label_info can not be None!')
  1057. im_resize_info = im_info['im_resize_info']
  1058. im_id = im_info['im_id']
  1059. im_shape = np.array(
  1060. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  1061. dtype=np.float32)
  1062. gt_bbox = label_info['gt_bbox']
  1063. gt_class = label_info['gt_class']
  1064. is_difficult = label_info['difficult']
  1065. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  1066. is_difficult)
  1067. else:
  1068. if im_info is None:
  1069. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  1070. 'Becasuse the im_info can not be None!')
  1071. im_resize_info = im_info['im_resize_info']
  1072. im_shape = np.array(
  1073. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  1074. dtype=np.float32)
  1075. outputs = (im, im_resize_info, im_shape)
  1076. return outputs
  1077. class ArrangeMaskRCNN(DetTransform):
  1078. """获取MaskRCNN模型训练/验证/预测所需信息。
  1079. Args:
  1080. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  1081. Raises:
  1082. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  1083. """
  1084. def __init__(self, mode=None):
  1085. if mode not in ['train', 'eval', 'test', 'quant']:
  1086. raise ValueError(
  1087. "mode must be in ['train', 'eval', 'test', 'quant']!")
  1088. self.mode = mode
  1089. def __call__(self, im, im_info=None, label_info=None):
  1090. """
  1091. Args:
  1092. im (np.ndarray): 图像np.ndarray数据。
  1093. im_info (dict, 可选): 存储与图像相关的信息。
  1094. label_info (dict, 可选): 存储与标注框相关的信息。
  1095. Returns:
  1096. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  1097. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  1098. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  1099. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  1100. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  1101. Raises:
  1102. TypeError: 形参数据类型不满足需求。
  1103. ValueError: 数据长度不匹配。
  1104. """
  1105. im = permute(im, False)
  1106. if self.mode == 'train':
  1107. if im_info is None or label_info is None:
  1108. raise TypeError(
  1109. 'Cannot do ArrangeTrainMaskRCNN! ' +
  1110. 'Becasuse the im_info and label_info can not be None!')
  1111. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1112. raise ValueError("gt num mismatch: bbox and class.")
  1113. im_resize_info = im_info['im_resize_info']
  1114. gt_bbox = label_info['gt_bbox']
  1115. gt_class = label_info['gt_class']
  1116. is_crowd = label_info['is_crowd']
  1117. assert 'gt_poly' in label_info
  1118. segms = label_info['gt_poly']
  1119. if len(segms) != 0:
  1120. assert len(segms) == is_crowd.shape[0]
  1121. gt_masks = []
  1122. valid = True
  1123. for i in range(len(segms)):
  1124. segm = segms[i]
  1125. gt_segm = []
  1126. if is_crowd[i]:
  1127. gt_segm.append([[0, 0]])
  1128. else:
  1129. for poly in segm:
  1130. if len(poly) == 0:
  1131. valid = False
  1132. break
  1133. gt_segm.append(np.array(poly).reshape(-1, 2))
  1134. if (not valid) or len(gt_segm) == 0:
  1135. break
  1136. gt_masks.append(gt_segm)
  1137. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  1138. gt_masks)
  1139. else:
  1140. if im_info is None:
  1141. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  1142. 'Becasuse the im_info can not be None!')
  1143. im_resize_info = im_info['im_resize_info']
  1144. im_shape = np.array(
  1145. (im_info['image_shape'][0], im_info['image_shape'][1], 1),
  1146. dtype=np.float32)
  1147. if self.mode == 'eval':
  1148. im_id = im_info['im_id']
  1149. outputs = (im, im_resize_info, im_id, im_shape)
  1150. else:
  1151. outputs = (im, im_resize_info, im_shape)
  1152. return outputs
  1153. class ArrangeYOLOv3(DetTransform):
  1154. """获取YOLOv3模型训练/验证/预测所需信息。
  1155. Args:
  1156. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  1157. Raises:
  1158. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  1159. """
  1160. def __init__(self, mode=None):
  1161. if mode not in ['train', 'eval', 'test', 'quant']:
  1162. raise ValueError(
  1163. "mode must be in ['train', 'eval', 'test', 'quant']!")
  1164. self.mode = mode
  1165. def __call__(self, im, im_info=None, label_info=None):
  1166. """
  1167. Args:
  1168. im (np.ndarray): 图像np.ndarray数据。
  1169. im_info (dict, 可选): 存储与图像相关的信息。
  1170. label_info (dict, 可选): 存储与标注框相关的信息。
  1171. Returns:
  1172. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  1173. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  1174. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  1175. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  1176. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  1177. 分别对应图像np.ndarray数据、图像大小信息。
  1178. Raises:
  1179. TypeError: 形参数据类型不满足需求。
  1180. ValueError: 数据长度不匹配。
  1181. """
  1182. im = permute(im, False)
  1183. if self.mode == 'train':
  1184. if im_info is None or label_info is None:
  1185. raise TypeError(
  1186. 'Cannot do ArrangeYolov3! ' +
  1187. 'Becasuse the im_info and label_info can not be None!')
  1188. im_shape = im_info['image_shape']
  1189. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1190. raise ValueError("gt num mismatch: bbox and class.")
  1191. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1192. raise ValueError("gt num mismatch: bbox and score.")
  1193. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1194. gt_class = np.zeros((50, ), dtype=np.int32)
  1195. gt_score = np.zeros((50, ), dtype=im.dtype)
  1196. gt_num = min(50, len(label_info['gt_bbox']))
  1197. if gt_num > 0:
  1198. label_info['gt_class'][:gt_num, 0] = label_info[
  1199. 'gt_class'][:gt_num, 0] - 1
  1200. if -1 not in label_info['gt_class']:
  1201. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1202. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1203. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1204. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1205. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1206. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1207. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1208. elif self.mode == 'eval':
  1209. if im_info is None or label_info is None:
  1210. raise TypeError(
  1211. 'Cannot do ArrangeYolov3! ' +
  1212. 'Becasuse the im_info and label_info can not be None!')
  1213. im_shape = im_info['image_shape']
  1214. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1215. raise ValueError("gt num mismatch: bbox and class.")
  1216. im_id = im_info['im_id']
  1217. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1218. gt_class = np.zeros((50, ), dtype=np.int32)
  1219. difficult = np.zeros((50, ), dtype=np.int32)
  1220. gt_num = min(50, len(label_info['gt_bbox']))
  1221. if gt_num > 0:
  1222. label_info['gt_class'][:gt_num, 0] = label_info[
  1223. 'gt_class'][:gt_num, 0] - 1
  1224. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1225. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1226. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1227. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1228. else:
  1229. if im_info is None:
  1230. raise TypeError('Cannot do ArrangeYolov3! ' +
  1231. 'Becasuse the im_info can not be None!')
  1232. im_shape = im_info['image_shape']
  1233. outputs = (im, im_shape)
  1234. return outputs
  1235. class ComposedRCNNTransforms(Compose):
  1236. """ RCNN模型(faster-rcnn/mask-rcnn)图像处理流程,具体如下,
  1237. 训练阶段:
  1238. 1. 随机以0.5的概率将图像水平翻转
  1239. 2. 图像归一化
  1240. 3. 图像按比例Resize,scale计算方式如下
  1241. scale = min_max_size[0] / short_size_of_image
  1242. if max_size_of_image * scale > min_max_size[1]:
  1243. scale = min_max_size[1] / max_size_of_image
  1244. 4. 将3步骤的长宽进行padding,使得长宽为32的倍数
  1245. 验证阶段:
  1246. 1. 图像归一化
  1247. 2. 图像按比例Resize,scale计算方式同上训练阶段
  1248. 3. 将2步骤的长宽进行padding,使得长宽为32的倍数
  1249. Args:
  1250. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1251. min_max_size(list): 图像在缩放时,最小边和最大边的约束条件
  1252. mean(list): 图像均值
  1253. std(list): 图像方差
  1254. random_horizontal_flip(bool): 是否以0.5的概率使用随机水平翻转增强,该仅在mode为`train`时生效,默认为True
  1255. """
  1256. def __init__(self,
  1257. mode,
  1258. min_max_size=[800, 1333],
  1259. mean=[0.485, 0.456, 0.406],
  1260. std=[0.229, 0.224, 0.225],
  1261. random_horizontal_flip=True):
  1262. if mode == 'train':
  1263. # 训练时的transforms,包含数据增强
  1264. transforms = [
  1265. Normalize(
  1266. mean=mean, std=std), ResizeByShort(
  1267. short_size=min_max_size[0], max_size=min_max_size[1]),
  1268. Padding(coarsest_stride=32)
  1269. ]
  1270. if random_horizontal_flip:
  1271. transforms.insert(0, RandomHorizontalFlip())
  1272. else:
  1273. # 验证/预测时的transforms
  1274. transforms = [
  1275. Normalize(
  1276. mean=mean, std=std), ResizeByShort(
  1277. short_size=min_max_size[0], max_size=min_max_size[1]),
  1278. Padding(coarsest_stride=32)
  1279. ]
  1280. super(ComposedRCNNTransforms, self).__init__(transforms)
  1281. class ComposedYOLOv3Transforms(Compose):
  1282. """YOLOv3模型的图像预处理流程,具体如下,
  1283. 训练阶段:
  1284. 1. 在前mixup_epoch轮迭代中,使用MixupImage策略,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#mixupimage
  1285. 2. 对图像进行随机扰动,包括亮度,对比度,饱和度和色调
  1286. 3. 随机扩充图像,见https://paddlex.readthedocs.io/zh_CN/latest/apis/transforms/det_transforms.html#randomexpand
  1287. 4. 随机裁剪图像
  1288. 5. 将4步骤的输出图像Resize成shape参数的大小
  1289. 6. 随机0.5的概率水平翻转图像
  1290. 7. 图像归一化
  1291. 验证/预测阶段:
  1292. 1. 将图像Resize成shape参数大小
  1293. 2. 图像归一化
  1294. Args:
  1295. mode(str): 图像处理流程所处阶段,训练/验证/预测,分别对应'train', 'eval', 'test'
  1296. shape(list): 输入模型中图像的大小,输入模型的图像会被Resize成此大小
  1297. mixup_epoch(int): 模型训练过程中,前mixup_epoch会使用mixup策略, 若设为-1,则表示不使用该策略
  1298. mean(list): 图像均值
  1299. std(list): 图像方差
  1300. random_distort(bool): 数据增强方式,参数仅在mode为`train`时生效,表示是否在训练过程中随机扰动图像,默认为True
  1301. random_expand(bool): 数据增强方式,参数仅在mode为`train`时生效,表示是否在训练过程中随机扩张图像,默认为True
  1302. random_crop(bool): 数据增强方式,参数仅在mode为`train`时生效,表示是否在训练过程中随机裁剪图像,默认为True
  1303. random_horizontal_flip(bool): 数据增强方式,参数仅在mode为`train`时生效,表示是否在训练过程中随机水平翻转图像,默认为True
  1304. """
  1305. def __init__(self,
  1306. mode,
  1307. shape=[608, 608],
  1308. mixup_epoch=250,
  1309. mean=[0.485, 0.456, 0.406],
  1310. std=[0.229, 0.224, 0.225],
  1311. random_distort=True,
  1312. random_expand=True,
  1313. random_crop=True,
  1314. random_horizontal_flip=True):
  1315. width = shape
  1316. if isinstance(shape, list):
  1317. if shape[0] != shape[1]:
  1318. raise Exception(
  1319. "In YOLOv3 model, width and height should be equal")
  1320. width = shape[0]
  1321. if width % 32 != 0:
  1322. raise Exception(
  1323. "In YOLOv3 model, width and height should be multiple of 32, e.g 224、256、320...."
  1324. )
  1325. if mode == 'train':
  1326. # 训练时的transforms,包含数据增强
  1327. transforms = [
  1328. MixupImage(mixup_epoch=mixup_epoch), Resize(
  1329. target_size=width, interp='RANDOM'), Normalize(
  1330. mean=mean, std=std)
  1331. ]
  1332. if random_horizontal_flip:
  1333. transforms.insert(1, RandomHorizontalFlip())
  1334. if random_crop:
  1335. transforms.insert(1, RandomCrop())
  1336. if random_expand:
  1337. transforms.insert(1, RandomExpand())
  1338. if random_distort:
  1339. transforms.insert(1, RandomDistort())
  1340. else:
  1341. # 验证/预测时的transforms
  1342. transforms = [
  1343. Resize(
  1344. target_size=width, interp='CUBIC'), Normalize(
  1345. mean=mean, std=std)
  1346. ]
  1347. super(ComposedYOLOv3Transforms, self).__init__(transforms)
  1348. class BatchRandomShape(DetTransform):
  1349. """调整图像大小(resize)。
  1350. 对batch数据中的每张图像全部resize到random_shapes中任意一个大小。
  1351. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  1352. Args:
  1353. random_shapes (list): resize大小选择列表。
  1354. 默认为[320, 352, 384, 416, 448, 480, 512, 544, 576, 608]。
  1355. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  1356. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"RANDOM"。
  1357. Raises:
  1358. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  1359. 'AREA', 'LANCZOS4', 'RANDOM']中。
  1360. """
  1361. # The interpolation mode
  1362. interp_dict = {
  1363. 'NEAREST': cv2.INTER_NEAREST,
  1364. 'LINEAR': cv2.INTER_LINEAR,
  1365. 'CUBIC': cv2.INTER_CUBIC,
  1366. 'AREA': cv2.INTER_AREA,
  1367. 'LANCZOS4': cv2.INTER_LANCZOS4
  1368. }
  1369. def __init__(
  1370. self,
  1371. random_shapes=[320, 352, 384, 416, 448, 480, 512, 544, 576, 608],
  1372. interp='RANDOM'):
  1373. if not (interp == "RANDOM" or interp in self.interp_dict):
  1374. raise ValueError("interp should be one of {}".format(
  1375. self.interp_dict.keys()))
  1376. self.random_shapes = random_shapes
  1377. self.interp = interp
  1378. def __call__(self, batch_data):
  1379. """
  1380. Args:
  1381. batch_data (list): 由与图像相关的各种信息组成的batch数据。
  1382. Returns:
  1383. list: 由与图像相关的各种信息组成的batch数据。
  1384. """
  1385. shape = np.random.choice(self.random_shapes)
  1386. if self.interp == "RANDOM":
  1387. interp = random.choice(list(self.interp_dict.keys()))
  1388. else:
  1389. interp = self.interp
  1390. for data_id, data in enumerate(batch_data):
  1391. data_list = list(data)
  1392. im = data_list[0]
  1393. im = np.swapaxes(im, 1, 0)
  1394. im = np.swapaxes(im, 1, 2)
  1395. im = resize(im, shape, self.interp_dict[interp])
  1396. im = np.swapaxes(im, 1, 2)
  1397. im = np.swapaxes(im, 1, 0)
  1398. data_list[0] = im
  1399. batch_data[data_id] = tuple(data_list)
  1400. return batch_data
  1401. class GenerateYoloTarget(object):
  1402. """生成YOLOv3的ground truth(真实标注框)在不同特征层的位置转换信息。
  1403. 该transform只在YOLOv3计算细粒度loss时使用。
  1404. Args:
  1405. anchors (list|tuple): anchor框的宽度和高度。
  1406. anchor_masks (list|tuple): 在计算损失时,使用anchor的mask索引。
  1407. num_classes (int): 类别数。默认为80。
  1408. iou_thresh (float): iou阈值,当anchor和真实标注框的iou大于该阈值时,计入target。默认为1.0。
  1409. """
  1410. def __init__(self,
  1411. anchors,
  1412. anchor_masks,
  1413. downsample_ratios,
  1414. num_classes=80,
  1415. iou_thresh=1.):
  1416. super(GenerateYoloTarget, self).__init__()
  1417. self.anchors = anchors
  1418. self.anchor_masks = anchor_masks
  1419. self.downsample_ratios = downsample_ratios
  1420. self.num_classes = num_classes
  1421. self.iou_thresh = iou_thresh
  1422. def __call__(self, batch_data):
  1423. """
  1424. Args:
  1425. batch_data (list): 由与图像相关的各种信息组成的batch数据。
  1426. Returns:
  1427. list: 由与图像相关的各种信息组成的batch数据。
  1428. 其中,每个数据新添加的字段为:
  1429. - target0 (np.ndarray): YOLOv3的ground truth在特征层0的位置转换信息,
  1430. 形状为(特征层0的anchor数量, 6+类别数, 特征层0的h, 特征层0的w)。
  1431. - target1 (np.ndarray): YOLOv3的ground truth在特征层1的位置转换信息,
  1432. 形状为(特征层1的anchor数量, 6+类别数, 特征层1的h, 特征层1的w)。
  1433. - ...
  1434. -targetn (np.ndarray): YOLOv3的ground truth在特征层n的位置转换信息,
  1435. 形状为(特征层n的anchor数量, 6+类别数, 特征层n的h, 特征层n的w)。
  1436. n的是大小由anchor_masks的长度决定。
  1437. """
  1438. im = batch_data[0][0]
  1439. h = im.shape[1]
  1440. w = im.shape[2]
  1441. an_hw = np.array(self.anchors) / np.array([[w, h]])
  1442. for data_id, data in enumerate(batch_data):
  1443. gt_bbox = data[1]
  1444. gt_class = data[2]
  1445. gt_score = data[3]
  1446. im_shape = data[4]
  1447. origin_h = float(im_shape[0])
  1448. origin_w = float(im_shape[1])
  1449. data_list = list(data)
  1450. for i, (
  1451. mask, downsample_ratio
  1452. ) in enumerate(zip(self.anchor_masks, self.downsample_ratios)):
  1453. grid_h = int(h / downsample_ratio)
  1454. grid_w = int(w / downsample_ratio)
  1455. target = np.zeros(
  1456. (len(mask), 6 + self.num_classes, grid_h, grid_w),
  1457. dtype=np.float32)
  1458. for b in range(gt_bbox.shape[0]):
  1459. gx = gt_bbox[b, 0] / float(origin_w)
  1460. gy = gt_bbox[b, 1] / float(origin_h)
  1461. gw = gt_bbox[b, 2] / float(origin_w)
  1462. gh = gt_bbox[b, 3] / float(origin_h)
  1463. cls = gt_class[b]
  1464. score = gt_score[b]
  1465. if gw <= 0. or gh <= 0. or score <= 0.:
  1466. continue
  1467. # find best match anchor index
  1468. best_iou = 0.
  1469. best_idx = -1
  1470. for an_idx in range(an_hw.shape[0]):
  1471. iou = jaccard_overlap(
  1472. [0., 0., gw, gh],
  1473. [0., 0., an_hw[an_idx, 0], an_hw[an_idx, 1]])
  1474. if iou > best_iou:
  1475. best_iou = iou
  1476. best_idx = an_idx
  1477. gi = int(gx * grid_w)
  1478. gj = int(gy * grid_h)
  1479. # gtbox should be regresed in this layes if best match
  1480. # anchor index in anchor mask of this layer
  1481. if best_idx in mask:
  1482. best_n = mask.index(best_idx)
  1483. # x, y, w, h, scale
  1484. target[best_n, 0, gj, gi] = gx * grid_w - gi
  1485. target[best_n, 1, gj, gi] = gy * grid_h - gj
  1486. target[best_n, 2, gj, gi] = np.log(
  1487. gw * w / self.anchors[best_idx][0])
  1488. target[best_n, 3, gj, gi] = np.log(
  1489. gh * h / self.anchors[best_idx][1])
  1490. target[best_n, 4, gj, gi] = 2.0 - gw * gh
  1491. # objectness record gt_score
  1492. target[best_n, 5, gj, gi] = score
  1493. # classification
  1494. target[best_n, 6 + cls, gj, gi] = 1.
  1495. # For non-matched anchors, calculate the target if the iou
  1496. # between anchor and gt is larger than iou_thresh
  1497. if self.iou_thresh < 1:
  1498. for idx, mask_i in enumerate(mask):
  1499. if mask_i == best_idx: continue
  1500. iou = jaccard_overlap(
  1501. [0., 0., gw, gh],
  1502. [0., 0., an_hw[mask_i, 0], an_hw[mask_i, 1]])
  1503. if iou > self.iou_thresh:
  1504. # x, y, w, h, scale
  1505. target[idx, 0, gj, gi] = gx * grid_w - gi
  1506. target[idx, 1, gj, gi] = gy * grid_h - gj
  1507. target[idx, 2, gj, gi] = np.log(
  1508. gw * w / self.anchors[mask_i][0])
  1509. target[idx, 3, gj, gi] = np.log(
  1510. gh * h / self.anchors[mask_i][1])
  1511. target[idx, 4, gj, gi] = 2.0 - gw * gh
  1512. # objectness record gt_score
  1513. target[idx, 5, gj, gi] = score
  1514. # classification
  1515. target[idx, 6 + cls, gj, gi] = 1.
  1516. data_list.append(target)
  1517. batch_data[data_id] = tuple(data_list)
  1518. return batch_data