det_transforms.py 53 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. try:
  15. from collections.abc import Sequence
  16. except Exception:
  17. from collections import Sequence
  18. import random
  19. import os.path as osp
  20. import numpy as np
  21. import cv2
  22. from PIL import Image, ImageEnhance
  23. from .ops import *
  24. from .box_utils import *
  25. class Compose:
  26. """根据数据预处理/增强列表对输入数据进行操作。
  27. 所有操作的输入图像流形状均是[H, W, C],其中H为图像高,W为图像宽,C为图像通道数。
  28. Args:
  29. transforms (list): 数据预处理/增强列表。
  30. Raises:
  31. TypeError: 形参数据类型不满足需求。
  32. ValueError: 数据长度不匹配。
  33. """
  34. def __init__(self, transforms):
  35. if not isinstance(transforms, list):
  36. raise TypeError('The transforms must be a list!')
  37. if len(transforms) < 1:
  38. raise ValueError('The length of transforms ' + \
  39. 'must be equal or larger than 1!')
  40. self.transforms = transforms
  41. self.use_mixup = False
  42. for t in self.transforms:
  43. if t.__class__.__name__ == 'MixupImage':
  44. self.use_mixup = True
  45. def __call__(self, im, im_info=None, label_info=None):
  46. """
  47. Args:
  48. im (str/np.ndarray): 图像路径/图像np.ndarray数据。
  49. im_info (dict): 存储与图像相关的信息,dict中的字段如下:
  50. - im_id (np.ndarray): 图像序列号,形状为(1,)。
  51. - origin_shape (np.ndarray): 图像原始大小,形状为(2,),
  52. origin_shape[0]为高,origin_shape[1]为宽。
  53. - mixup (list): list为[im, im_info, label_info],分别对应
  54. 与当前图像进行mixup的图像np.ndarray数据、图像相关信息、标注框相关信息;
  55. 注意,当前epoch若无需进行mixup,则无该字段。
  56. label_info (dict): 存储与标注框相关的信息,dict中的字段如下:
  57. - gt_bbox (np.ndarray): 真实标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  58. 其中n代表真实标注框的个数。
  59. - gt_class (np.ndarray): 每个真实标注框对应的类别序号,形状为(n, 1),
  60. 其中n代表真实标注框的个数。
  61. - gt_score (np.ndarray): 每个真实标注框对应的混合得分,形状为(n, 1),
  62. 其中n代表真实标注框的个数。
  63. - gt_poly (list): 每个真实标注框内的多边形分割区域,每个分割区域由点的x、y坐标组成,
  64. 长度为n,其中n代表真实标注框的个数。
  65. - is_crowd (np.ndarray): 每个真实标注框中是否是一组对象,形状为(n, 1),
  66. 其中n代表真实标注框的个数。
  67. - difficult (np.ndarray): 每个真实标注框中的对象是否为难识别对象,形状为(n, 1),
  68. 其中n代表真实标注框的个数。
  69. Returns:
  70. tuple: 根据网络所需字段所组成的tuple;
  71. 字段由transforms中的最后一个数据预处理操作决定。
  72. """
  73. def decode_image(im_file, im_info, label_info):
  74. if im_info is None:
  75. im_info = dict()
  76. try:
  77. im = cv2.imread(im_file).astype('float32')
  78. except:
  79. raise TypeError(
  80. 'Can\'t read The image file {}!'.format(im_file))
  81. im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
  82. # make default im_info with [h, w, 1]
  83. im_info['im_resize_info'] = np.array(
  84. [im.shape[0], im.shape[1], 1.], dtype=np.float32)
  85. # copy augment_shape from origin_shape
  86. im_info['augment_shape'] = np.array([im.shape[0],
  87. im.shape[1]]).astype('int32')
  88. if not self.use_mixup:
  89. if 'mixup' in im_info:
  90. del im_info['mixup']
  91. # decode mixup image
  92. if 'mixup' in im_info:
  93. im_info['mixup'] = \
  94. decode_image(im_info['mixup'][0],
  95. im_info['mixup'][1],
  96. im_info['mixup'][2])
  97. if label_info is None:
  98. return (im, im_info)
  99. else:
  100. return (im, im_info, label_info)
  101. outputs = decode_image(im, im_info, label_info)
  102. im = outputs[0]
  103. im_info = outputs[1]
  104. if len(outputs) == 3:
  105. label_info = outputs[2]
  106. for op in self.transforms:
  107. if im is None:
  108. return None
  109. outputs = op(im, im_info, label_info)
  110. im = outputs[0]
  111. return outputs
  112. class ResizeByShort:
  113. """根据图像的短边调整图像大小(resize)。
  114. 1. 获取图像的长边和短边长度。
  115. 2. 根据短边与short_size的比例,计算长边的目标长度,
  116. 此时高、宽的resize比例为short_size/原图短边长度。
  117. 3. 如果max_size>0,调整resize比例:
  118. 如果长边的目标长度>max_size,则高、宽的resize比例为max_size/原图长边长度。
  119. 4. 根据调整大小的比例对图像进行resize。
  120. Args:
  121. target_size (int): 短边目标长度。默认为800。
  122. max_size (int): 长边目标长度的最大限制。默认为1333。
  123. Raises:
  124. TypeError: 形参数据类型不满足需求。
  125. """
  126. def __init__(self, short_size=800, max_size=1333):
  127. self.max_size = int(max_size)
  128. if not isinstance(short_size, int):
  129. raise TypeError(
  130. "Type of short_size is invalid. Must be Integer, now is {}".
  131. format(type(short_size)))
  132. self.short_size = short_size
  133. if not (isinstance(self.max_size, int)):
  134. raise TypeError("max_size: input type is invalid.")
  135. def __call__(self, im, im_info=None, label_info=None):
  136. """
  137. Args:
  138. im (numnp.ndarraypy): 图像np.ndarray数据。
  139. im_info (dict, 可选): 存储与图像相关的信息。
  140. label_info (dict, 可选): 存储与标注框相关的信息。
  141. Returns:
  142. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  143. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  144. 存储与标注框相关信息的字典。
  145. 其中,im_info更新字段为:
  146. - im_resize_info (np.ndarray): resize后的图像高、resize后的图像宽、resize后的图像相对原始图的缩放比例
  147. 三者组成的np.ndarray,形状为(3,)。
  148. Raises:
  149. TypeError: 形参数据类型不满足需求。
  150. ValueError: 数据长度不匹配。
  151. """
  152. if im_info is None:
  153. im_info = dict()
  154. if not isinstance(im, np.ndarray):
  155. raise TypeError("ResizeByShort: image type is not numpy.")
  156. if len(im.shape) != 3:
  157. raise ValueError('ResizeByShort: image is not 3-dimensional.')
  158. im_short_size = min(im.shape[0], im.shape[1])
  159. im_long_size = max(im.shape[0], im.shape[1])
  160. scale = float(self.short_size) / im_short_size
  161. if self.max_size > 0 and np.round(
  162. scale * im_long_size) > self.max_size:
  163. scale = float(self.max_size) / float(im_long_size)
  164. resized_width = int(round(im.shape[1] * scale))
  165. resized_height = int(round(im.shape[0] * scale))
  166. im_resize_info = [resized_height, resized_width, scale]
  167. im = cv2.resize(
  168. im, (resized_width, resized_height),
  169. interpolation=cv2.INTER_LINEAR)
  170. im_info['im_resize_info'] = np.array(im_resize_info).astype(np.float32)
  171. if label_info is None:
  172. return (im, im_info)
  173. else:
  174. return (im, im_info, label_info)
  175. class Padding:
  176. """将图像的长和宽padding至coarsest_stride的倍数。如输入图像为[300, 640],
  177. `coarest_stride`为32,则由于300不为32的倍数,因此在图像最右和最下使用0值
  178. 进行padding,最终输出图像为[320, 640]。
  179. 1. 如果coarsest_stride为1则直接返回。
  180. 2. 获取图像的高H、宽W。
  181. 3. 计算填充后图像的高H_new、宽W_new。
  182. 4. 构建大小为(H_new, W_new, 3)像素值为0的np.ndarray,
  183. 并将原图的np.ndarray粘贴于左上角。
  184. Args:
  185. coarsest_stride (int): 填充后的图像长、宽为该参数的倍数,默认为1。
  186. """
  187. def __init__(self, coarsest_stride=1):
  188. self.coarsest_stride = coarsest_stride
  189. def __call__(self, im, im_info=None, label_info=None):
  190. """
  191. Args:
  192. im (numnp.ndarraypy): 图像np.ndarray数据。
  193. im_info (dict, 可选): 存储与图像相关的信息。
  194. label_info (dict, 可选): 存储与标注框相关的信息。
  195. Returns:
  196. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  197. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  198. 存储与标注框相关信息的字典。
  199. Raises:
  200. TypeError: 形参数据类型不满足需求。
  201. ValueError: 数据长度不匹配。
  202. """
  203. if self.coarsest_stride == 1:
  204. if label_info is None:
  205. return (im, im_info)
  206. else:
  207. return (im, im_info, label_info)
  208. if im_info is None:
  209. im_info = dict()
  210. if not isinstance(im, np.ndarray):
  211. raise TypeError("Padding: image type is not numpy.")
  212. if len(im.shape) != 3:
  213. raise ValueError('Padding: image is not 3-dimensional.')
  214. im_h, im_w, im_c = im.shape[:]
  215. if self.coarsest_stride > 1:
  216. padding_im_h = int(
  217. np.ceil(im_h / self.coarsest_stride) * self.coarsest_stride)
  218. padding_im_w = int(
  219. np.ceil(im_w / self.coarsest_stride) * self.coarsest_stride)
  220. padding_im = np.zeros((padding_im_h, padding_im_w, im_c),
  221. dtype=np.float32)
  222. padding_im[:im_h, :im_w, :] = im
  223. if label_info is None:
  224. return (padding_im, im_info)
  225. else:
  226. return (padding_im, im_info, label_info)
  227. class Resize:
  228. """调整图像大小(resize)。
  229. - 当目标大小(target_size)类型为int时,根据插值方式,
  230. 将图像resize为[target_size, target_size]。
  231. - 当目标大小(target_size)类型为list或tuple时,根据插值方式,
  232. 将图像resize为target_size。
  233. 注意:当插值方式为“RANDOM”时,则随机选取一种插值方式进行resize。
  234. Args:
  235. target_size (int/list/tuple): 短边目标长度。默认为608。
  236. interp (str): resize的插值方式,与opencv的插值方式对应,取值范围为
  237. ['NEAREST', 'LINEAR', 'CUBIC', 'AREA', 'LANCZOS4', 'RANDOM']。默认为"LINEAR"。
  238. Raises:
  239. TypeError: 形参数据类型不满足需求。
  240. ValueError: 插值方式不在['NEAREST', 'LINEAR', 'CUBIC',
  241. 'AREA', 'LANCZOS4', 'RANDOM']中。
  242. """
  243. # The interpolation mode
  244. interp_dict = {
  245. 'NEAREST': cv2.INTER_NEAREST,
  246. 'LINEAR': cv2.INTER_LINEAR,
  247. 'CUBIC': cv2.INTER_CUBIC,
  248. 'AREA': cv2.INTER_AREA,
  249. 'LANCZOS4': cv2.INTER_LANCZOS4
  250. }
  251. def __init__(self, target_size=608, interp='LINEAR'):
  252. self.interp = interp
  253. if not (interp == "RANDOM" or interp in self.interp_dict):
  254. raise ValueError("interp should be one of {}".format(
  255. self.interp_dict.keys()))
  256. if isinstance(target_size, list) or isinstance(target_size, tuple):
  257. if len(target_size) != 2:
  258. raise TypeError(
  259. 'when target is list or tuple, it should include 2 elements, but it is {}'
  260. .format(target_size))
  261. elif not isinstance(target_size, int):
  262. raise TypeError(
  263. "Type of target_size is invalid. Must be Integer or List or tuple, now is {}"
  264. .format(type(target_size)))
  265. self.target_size = target_size
  266. def __call__(self, im, im_info=None, label_info=None):
  267. """
  268. Args:
  269. im (np.ndarray): 图像np.ndarray数据。
  270. im_info (dict, 可选): 存储与图像相关的信息。
  271. label_info (dict, 可选): 存储与标注框相关的信息。
  272. Returns:
  273. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  274. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  275. 存储与标注框相关信息的字典。
  276. Raises:
  277. TypeError: 形参数据类型不满足需求。
  278. ValueError: 数据长度不匹配。
  279. """
  280. if im_info is None:
  281. im_info = dict()
  282. if not isinstance(im, np.ndarray):
  283. raise TypeError("Resize: image type is not numpy.")
  284. if len(im.shape) != 3:
  285. raise ValueError('Resize: image is not 3-dimensional.')
  286. if self.interp == "RANDOM":
  287. interp = random.choice(list(self.interp_dict.keys()))
  288. else:
  289. interp = self.interp
  290. im = resize(im, self.target_size, self.interp_dict[interp])
  291. if label_info is None:
  292. return (im, im_info)
  293. else:
  294. return (im, im_info, label_info)
  295. class RandomHorizontalFlip:
  296. """随机翻转图像、标注框、分割信息,模型训练时的数据增强操作。
  297. 1. 随机采样一个0-1之间的小数,当小数小于水平翻转概率时,
  298. 执行2-4步操作,否则直接返回。
  299. 2. 水平翻转图像。
  300. 3. 计算翻转后的真实标注框的坐标,更新label_info中的gt_bbox信息。
  301. 4. 计算翻转后的真实分割区域的坐标,更新label_info中的gt_poly信息。
  302. Args:
  303. prob (float): 随机水平翻转的概率。默认为0.5。
  304. Raises:
  305. TypeError: 形参数据类型不满足需求。
  306. """
  307. def __init__(self, prob=0.5):
  308. self.prob = prob
  309. if not isinstance(self.prob, float):
  310. raise TypeError("RandomHorizontalFlip: input type is invalid.")
  311. def __call__(self, im, im_info=None, label_info=None):
  312. """
  313. Args:
  314. im (np.ndarray): 图像np.ndarray数据。
  315. im_info (dict, 可选): 存储与图像相关的信息。
  316. label_info (dict, 可选): 存储与标注框相关的信息。
  317. Returns:
  318. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  319. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  320. 存储与标注框相关信息的字典。
  321. 其中,im_info更新字段为:
  322. - gt_bbox (np.ndarray): 水平翻转后的标注框坐标[x1, y1, x2, y2],形状为(n, 4),
  323. 其中n代表真实标注框的个数。
  324. - gt_poly (list): 水平翻转后的多边形分割区域的x、y坐标,长度为n,
  325. 其中n代表真实标注框的个数。
  326. Raises:
  327. TypeError: 形参数据类型不满足需求。
  328. ValueError: 数据长度不匹配。
  329. """
  330. if not isinstance(im, np.ndarray):
  331. raise TypeError(
  332. "RandomHorizontalFlip: image is not a numpy array.")
  333. if len(im.shape) != 3:
  334. raise ValueError(
  335. "RandomHorizontalFlip: image is not 3-dimensional.")
  336. if im_info is None or label_info is None:
  337. raise TypeError(
  338. 'Cannot do RandomHorizontalFlip! ' +
  339. 'Becasuse the im_info and label_info can not be None!')
  340. if 'augment_shape' not in im_info:
  341. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  342. 'Becasuse augment_shape is not in im_info!')
  343. if 'gt_bbox' not in label_info:
  344. raise TypeError('Cannot do RandomHorizontalFlip! ' + \
  345. 'Becasuse gt_bbox is not in label_info!')
  346. augment_shape = im_info['augment_shape']
  347. gt_bbox = label_info['gt_bbox']
  348. height = augment_shape[0]
  349. width = augment_shape[1]
  350. if np.random.uniform(0, 1) < self.prob:
  351. im = horizontal_flip(im)
  352. if gt_bbox.shape[0] == 0:
  353. if label_info is None:
  354. return (im, im_info)
  355. else:
  356. return (im, im_info, label_info)
  357. label_info['gt_bbox'] = box_horizontal_flip(gt_bbox, width)
  358. if 'gt_poly' in label_info and \
  359. len(label_info['gt_poly']) != 0:
  360. label_info['gt_poly'] = segms_horizontal_flip(
  361. label_info['gt_poly'], height, width)
  362. if label_info is None:
  363. return (im, im_info)
  364. else:
  365. return (im, im_info, label_info)
  366. class Normalize:
  367. """对图像进行标准化。
  368. 1. 归一化图像到到区间[0.0, 1.0]。
  369. 2. 对图像进行减均值除以标准差操作。
  370. Args:
  371. mean (list): 图像数据集的均值。默认为[0.485, 0.456, 0.406]。
  372. std (list): 图像数据集的标准差。默认为[0.229, 0.224, 0.225]。
  373. Raises:
  374. TypeError: 形参数据类型不满足需求。
  375. """
  376. def __init__(self, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]):
  377. self.mean = mean
  378. self.std = std
  379. if not (isinstance(self.mean, list) and isinstance(self.std, list)):
  380. raise TypeError("NormalizeImage: input type is invalid.")
  381. from functools import reduce
  382. if reduce(lambda x, y: x * y, self.std) == 0:
  383. raise TypeError('NormalizeImage: std is invalid!')
  384. def __call__(self, im, im_info=None, label_info=None):
  385. """
  386. Args:
  387. im (numnp.ndarraypy): 图像np.ndarray数据。
  388. im_info (dict, 可选): 存储与图像相关的信息。
  389. label_info (dict, 可选): 存储与标注框相关的信息。
  390. Returns:
  391. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  392. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  393. 存储与标注框相关信息的字典。
  394. """
  395. mean = np.array(self.mean)[np.newaxis, np.newaxis, :]
  396. std = np.array(self.std)[np.newaxis, np.newaxis, :]
  397. im = normalize(im, mean, std)
  398. if label_info is None:
  399. return (im, im_info)
  400. else:
  401. return (im, im_info, label_info)
  402. class RandomDistort:
  403. """以一定的概率对图像进行随机像素内容变换,模型训练时的数据增强操作
  404. 1. 对变换的操作顺序进行随机化操作。
  405. 2. 按照1中的顺序以一定的概率在范围[-range, range]对图像进行随机像素内容变换。
  406. Args:
  407. brightness_range (float): 明亮度因子的范围。默认为0.5。
  408. brightness_prob (float): 随机调整明亮度的概率。默认为0.5。
  409. contrast_range (float): 对比度因子的范围。默认为0.5。
  410. contrast_prob (float): 随机调整对比度的概率。默认为0.5。
  411. saturation_range (float): 饱和度因子的范围。默认为0.5。
  412. saturation_prob (float): 随机调整饱和度的概率。默认为0.5。
  413. hue_range (int): 色调因子的范围。默认为18。
  414. hue_prob (float): 随机调整色调的概率。默认为0.5。
  415. """
  416. def __init__(self,
  417. brightness_range=0.5,
  418. brightness_prob=0.5,
  419. contrast_range=0.5,
  420. contrast_prob=0.5,
  421. saturation_range=0.5,
  422. saturation_prob=0.5,
  423. hue_range=18,
  424. hue_prob=0.5):
  425. self.brightness_range = brightness_range
  426. self.brightness_prob = brightness_prob
  427. self.contrast_range = contrast_range
  428. self.contrast_prob = contrast_prob
  429. self.saturation_range = saturation_range
  430. self.saturation_prob = saturation_prob
  431. self.hue_range = hue_range
  432. self.hue_prob = hue_prob
  433. def __call__(self, im, im_info=None, label_info=None):
  434. """
  435. Args:
  436. im (np.ndarray): 图像np.ndarray数据。
  437. im_info (dict, 可选): 存储与图像相关的信息。
  438. label_info (dict, 可选): 存储与标注框相关的信息。
  439. Returns:
  440. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  441. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  442. 存储与标注框相关信息的字典。
  443. """
  444. brightness_lower = 1 - self.brightness_range
  445. brightness_upper = 1 + self.brightness_range
  446. contrast_lower = 1 - self.contrast_range
  447. contrast_upper = 1 + self.contrast_range
  448. saturation_lower = 1 - self.saturation_range
  449. saturation_upper = 1 + self.saturation_range
  450. hue_lower = -self.hue_range
  451. hue_upper = self.hue_range
  452. ops = [brightness, contrast, saturation, hue]
  453. random.shuffle(ops)
  454. params_dict = {
  455. 'brightness': {
  456. 'brightness_lower': brightness_lower,
  457. 'brightness_upper': brightness_upper
  458. },
  459. 'contrast': {
  460. 'contrast_lower': contrast_lower,
  461. 'contrast_upper': contrast_upper
  462. },
  463. 'saturation': {
  464. 'saturation_lower': saturation_lower,
  465. 'saturation_upper': saturation_upper
  466. },
  467. 'hue': {
  468. 'hue_lower': hue_lower,
  469. 'hue_upper': hue_upper
  470. }
  471. }
  472. prob_dict = {
  473. 'brightness': self.brightness_prob,
  474. 'contrast': self.contrast_prob,
  475. 'saturation': self.saturation_prob,
  476. 'hue': self.hue_prob
  477. }
  478. im = im.astype('uint8')
  479. im = Image.fromarray(im)
  480. for id in range(4):
  481. params = params_dict[ops[id].__name__]
  482. prob = prob_dict[ops[id].__name__]
  483. params['im'] = im
  484. if np.random.uniform(0, 1) < prob:
  485. im = ops[id](**params)
  486. im = np.asarray(im).astype('float32')
  487. if label_info is None:
  488. return (im, im_info)
  489. else:
  490. return (im, im_info, label_info)
  491. class MixupImage:
  492. """对图像进行mixup操作,模型训练时的数据增强操作,目前仅YOLOv3模型支持该transform。
  493. 当label_info中不存在mixup字段时,直接返回,否则进行下述操作:
  494. 1. 从随机beta分布中抽取出随机因子factor。
  495. 2.
  496. - 当factor>=1.0时,去除label_info中的mixup字段,直接返回。
  497. - 当factor<=0.0时,直接返回label_info中的mixup字段,并在label_info中去除该字段。
  498. - 其余情况,执行下述操作:
  499. (1)原图像乘以factor,mixup图像乘以(1-factor),叠加2个结果。
  500. (2)拼接原图像标注框和mixup图像标注框。
  501. (3)拼接原图像标注框类别和mixup图像标注框类别。
  502. (4)原图像标注框混合得分乘以factor,mixup图像标注框混合得分乘以(1-factor),叠加2个结果。
  503. 3. 更新im_info中的augment_shape信息。
  504. Args:
  505. alpha (float): 随机beta分布的下限。默认为1.5。
  506. beta (float): 随机beta分布的上限。默认为1.5。
  507. mixup_epoch (int): 在前mixup_epoch轮使用mixup增强操作;当该参数为-1时,该策略不会生效。
  508. 默认为-1。
  509. Raises:
  510. ValueError: 数据长度不匹配。
  511. """
  512. def __init__(self, alpha=1.5, beta=1.5, mixup_epoch=-1):
  513. self.alpha = alpha
  514. self.beta = beta
  515. if self.alpha <= 0.0:
  516. raise ValueError("alpha shold be positive in MixupImage")
  517. if self.beta <= 0.0:
  518. raise ValueError("beta shold be positive in MixupImage")
  519. self.mixup_epoch = mixup_epoch
  520. def _mixup_img(self, img1, img2, factor):
  521. h = max(img1.shape[0], img2.shape[0])
  522. w = max(img1.shape[1], img2.shape[1])
  523. img = np.zeros((h, w, img1.shape[2]), 'float32')
  524. img[:img1.shape[0], :img1.shape[1], :] = \
  525. img1.astype('float32') * factor
  526. img[:img2.shape[0], :img2.shape[1], :] += \
  527. img2.astype('float32') * (1.0 - factor)
  528. return img.astype('uint8')
  529. def __call__(self, im, im_info=None, label_info=None):
  530. """
  531. Args:
  532. im (np.ndarray): 图像np.ndarray数据。
  533. im_info (dict, 可选): 存储与图像相关的信息。
  534. label_info (dict, 可选): 存储与标注框相关的信息。
  535. Returns:
  536. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  537. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  538. 存储与标注框相关信息的字典。
  539. 其中,im_info更新字段为:
  540. - augment_shape (np.ndarray): mixup后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  541. im_info删除的字段:
  542. - mixup (list): 与当前字段进行mixup的图像相关信息。
  543. label_info更新字段为:
  544. - gt_bbox (np.ndarray): mixup后真实标注框坐标,形状为(n, 4),
  545. 其中n代表真实标注框的个数。
  546. - gt_class (np.ndarray): mixup后每个真实标注框对应的类别序号,形状为(n, 1),
  547. 其中n代表真实标注框的个数。
  548. - gt_score (np.ndarray): mixup后每个真实标注框对应的混合得分,形状为(n, 1),
  549. 其中n代表真实标注框的个数。
  550. Raises:
  551. TypeError: 形参数据类型不满足需求。
  552. """
  553. if im_info is None:
  554. raise TypeError('Cannot do MixupImage! ' +
  555. 'Becasuse the im_info can not be None!')
  556. if 'mixup' not in im_info:
  557. if label_info is None:
  558. return (im, im_info)
  559. else:
  560. return (im, im_info, label_info)
  561. factor = np.random.beta(self.alpha, self.beta)
  562. factor = max(0.0, min(1.0, factor))
  563. if im_info['epoch'] > self.mixup_epoch \
  564. or factor >= 1.0:
  565. im_info.pop('mixup')
  566. if label_info is None:
  567. return (im, im_info)
  568. else:
  569. return (im, im_info, label_info)
  570. if factor <= 0.0:
  571. return im_info.pop('mixup')
  572. im = self._mixup_img(im, im_info['mixup'][0], factor)
  573. if label_info is None:
  574. raise TypeError('Cannot do MixupImage! ' +
  575. 'Becasuse the label_info can not be None!')
  576. if 'gt_bbox' not in label_info or \
  577. 'gt_class' not in label_info or \
  578. 'gt_score' not in label_info:
  579. raise TypeError('Cannot do MixupImage! ' + \
  580. 'Becasuse gt_bbox/gt_class/gt_score is not in label_info!')
  581. gt_bbox1 = label_info['gt_bbox']
  582. gt_bbox2 = im_info['mixup'][2]['gt_bbox']
  583. gt_bbox = np.concatenate((gt_bbox1, gt_bbox2), axis=0)
  584. gt_class1 = label_info['gt_class']
  585. gt_class2 = im_info['mixup'][2]['gt_class']
  586. gt_class = np.concatenate((gt_class1, gt_class2), axis=0)
  587. gt_score1 = label_info['gt_score']
  588. gt_score2 = im_info['mixup'][2]['gt_score']
  589. gt_score = np.concatenate(
  590. (gt_score1 * factor, gt_score2 * (1. - factor)), axis=0)
  591. if 'gt_poly' in label_info:
  592. gt_poly1 = label_info['gt_poly']
  593. gt_poly2 = im_info['mixup'][2]['gt_poly']
  594. label_info['gt_poly'] = gt_poly1 + gt_poly2
  595. is_crowd1 = label_info['is_crowd']
  596. is_crowd2 = im_info['mixup'][2]['is_crowd']
  597. is_crowd = np.concatenate((is_crowd1, is_crowd2), axis=0)
  598. label_info['gt_bbox'] = gt_bbox
  599. label_info['gt_score'] = gt_score
  600. label_info['gt_class'] = gt_class
  601. label_info['is_crowd'] = is_crowd
  602. im_info['augment_shape'] = np.array([im.shape[0],
  603. im.shape[1]]).astype('int32')
  604. im_info.pop('mixup')
  605. if label_info is None:
  606. return (im, im_info)
  607. else:
  608. return (im, im_info, label_info)
  609. class RandomExpand:
  610. """随机扩张图像,模型训练时的数据增强操作。
  611. 1. 随机选取扩张比例(扩张比例大于1时才进行扩张)。
  612. 2. 计算扩张后图像大小。
  613. 3. 初始化像素值为输入填充值的图像,并将原图像随机粘贴于该图像上。
  614. 4. 根据原图像粘贴位置换算出扩张后真实标注框的位置坐标。
  615. 5. 根据原图像粘贴位置换算出扩张后真实分割区域的位置坐标。
  616. Args:
  617. ratio (float): 图像扩张的最大比例。默认为4.0。
  618. prob (float): 随机扩张的概率。默认为0.5。
  619. fill_value (list): 扩张图像的初始填充值(0-255)。默认为[123.675, 116.28, 103.53]。
  620. """
  621. def __init__(self,
  622. ratio=4.,
  623. prob=0.5,
  624. fill_value=[123.675, 116.28, 103.53]):
  625. super(RandomExpand, self).__init__()
  626. assert ratio > 1.01, "expand ratio must be larger than 1.01"
  627. self.ratio = ratio
  628. self.prob = prob
  629. assert isinstance(fill_value, Sequence), \
  630. "fill value must be sequence"
  631. if not isinstance(fill_value, tuple):
  632. fill_value = tuple(fill_value)
  633. self.fill_value = fill_value
  634. def __call__(self, im, im_info=None, label_info=None):
  635. """
  636. Args:
  637. im (np.ndarray): 图像np.ndarray数据。
  638. im_info (dict, 可选): 存储与图像相关的信息。
  639. label_info (dict, 可选): 存储与标注框相关的信息。
  640. Returns:
  641. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  642. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  643. 存储与标注框相关信息的字典。
  644. 其中,im_info更新字段为:
  645. - augment_shape (np.ndarray): 扩张后的图像高、宽二者组成的np.ndarray,形状为(2,)。
  646. label_info更新字段为:
  647. - gt_bbox (np.ndarray): 随机扩张后真实标注框坐标,形状为(n, 4),
  648. 其中n代表真实标注框的个数。
  649. - gt_class (np.ndarray): 随机扩张后每个真实标注框对应的类别序号,形状为(n, 1),
  650. 其中n代表真实标注框的个数。
  651. Raises:
  652. TypeError: 形参数据类型不满足需求。
  653. """
  654. if im_info is None or label_info is None:
  655. raise TypeError(
  656. 'Cannot do RandomExpand! ' +
  657. 'Becasuse the im_info and label_info can not be None!')
  658. if 'augment_shape' not in im_info:
  659. raise TypeError('Cannot do RandomExpand! ' + \
  660. 'Becasuse augment_shape is not in im_info!')
  661. if 'gt_bbox' not in label_info or \
  662. 'gt_class' not in label_info:
  663. raise TypeError('Cannot do RandomExpand! ' + \
  664. 'Becasuse gt_bbox/gt_class is not in label_info!')
  665. if np.random.uniform(0., 1.) < self.prob:
  666. return (im, im_info, label_info)
  667. augment_shape = im_info['augment_shape']
  668. height = int(augment_shape[0])
  669. width = int(augment_shape[1])
  670. expand_ratio = np.random.uniform(1., self.ratio)
  671. h = int(height * expand_ratio)
  672. w = int(width * expand_ratio)
  673. if not h > height or not w > width:
  674. return (im, im_info, label_info)
  675. y = np.random.randint(0, h - height)
  676. x = np.random.randint(0, w - width)
  677. canvas = np.ones((h, w, 3), dtype=np.float32)
  678. canvas *= np.array(self.fill_value, dtype=np.float32)
  679. canvas[y:y + height, x:x + width, :] = im
  680. im_info['augment_shape'] = np.array([h, w]).astype('int32')
  681. if 'gt_bbox' in label_info and len(label_info['gt_bbox']) > 0:
  682. label_info['gt_bbox'] += np.array([x, y] * 2, dtype=np.float32)
  683. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  684. label_info['gt_poly'] = expand_segms(label_info['gt_poly'], x, y,
  685. height, width, expand_ratio)
  686. return (canvas, im_info, label_info)
  687. class RandomCrop:
  688. """随机裁剪图像。
  689. 1. 若allow_no_crop为True,则在thresholds加入’no_crop’。
  690. 2. 随机打乱thresholds。
  691. 3. 遍历thresholds中各元素:
  692. (1) 如果当前thresh为’no_crop’,则返回原始图像和标注信息。
  693. (2) 随机取出aspect_ratio和scaling中的值并由此计算出候选裁剪区域的高、宽、起始点。
  694. (3) 计算真实标注框与候选裁剪区域IoU,若全部真实标注框的IoU都小于thresh,则继续第3步。
  695. (4) 如果cover_all_box为True且存在真实标注框的IoU小于thresh,则继续第3步。
  696. (5) 筛选出位于候选裁剪区域内的真实标注框,若有效框的个数为0,则继续第3步,否则进行第4步。
  697. 4. 换算有效真值标注框相对候选裁剪区域的位置坐标。
  698. 5. 换算有效分割区域相对候选裁剪区域的位置坐标。
  699. Args:
  700. aspect_ratio (list): 裁剪后短边缩放比例的取值范围,以[min, max]形式表示。默认值为[.5, 2.]。
  701. thresholds (list): 判断裁剪候选区域是否有效所需的IoU阈值取值列表。默认值为[.0, .1, .3, .5, .7, .9]。
  702. scaling (list): 裁剪面积相对原面积的取值范围,以[min, max]形式表示。默认值为[.3, 1.]。
  703. num_attempts (int): 在放弃寻找有效裁剪区域前尝试的次数。默认值为50。
  704. allow_no_crop (bool): 是否允许未进行裁剪。默认值为True。
  705. cover_all_box (bool): 是否要求所有的真实标注框都必须在裁剪区域内。默认值为False。
  706. """
  707. def __init__(self,
  708. aspect_ratio=[.5, 2.],
  709. thresholds=[.0, .1, .3, .5, .7, .9],
  710. scaling=[.3, 1.],
  711. num_attempts=50,
  712. allow_no_crop=True,
  713. cover_all_box=False):
  714. self.aspect_ratio = aspect_ratio
  715. self.thresholds = thresholds
  716. self.scaling = scaling
  717. self.num_attempts = num_attempts
  718. self.allow_no_crop = allow_no_crop
  719. self.cover_all_box = cover_all_box
  720. def __call__(self, im, im_info=None, label_info=None):
  721. """
  722. Args:
  723. im (np.ndarray): 图像np.ndarray数据。
  724. im_info (dict, 可选): 存储与图像相关的信息。
  725. label_info (dict, 可选): 存储与标注框相关的信息。
  726. Returns:
  727. tuple: 当label_info为空时,返回的tuple为(im, im_info),分别对应图像np.ndarray数据、存储与图像相关信息的字典;
  728. 当label_info不为空时,返回的tuple为(im, im_info, label_info),分别对应图像np.ndarray数据、
  729. 存储与标注框相关信息的字典。
  730. 其中,label_info更新字段为:
  731. - gt_bbox (np.ndarray): 随机裁剪后真实标注框坐标,形状为(n, 4),
  732. 其中n代表真实标注框的个数。
  733. - gt_class (np.ndarray): 随机裁剪后每个真实标注框对应的类别序号,形状为(n, 1),
  734. 其中n代表真实标注框的个数。
  735. - gt_score (np.ndarray): 随机裁剪后每个真实标注框对应的混合得分,形状为(n, 1),
  736. 其中n代表真实标注框的个数。
  737. Raises:
  738. TypeError: 形参数据类型不满足需求。
  739. """
  740. if im_info is None or label_info is None:
  741. raise TypeError(
  742. 'Cannot do RandomCrop! ' +
  743. 'Becasuse the im_info and label_info can not be None!')
  744. if 'augment_shape' not in im_info:
  745. raise TypeError('Cannot do RandomCrop! ' + \
  746. 'Becasuse augment_shape is not in im_info!')
  747. if 'gt_bbox' not in label_info or \
  748. 'gt_class' not in label_info:
  749. raise TypeError('Cannot do RandomCrop! ' + \
  750. 'Becasuse gt_bbox/gt_class is not in label_info!')
  751. if len(label_info['gt_bbox']) == 0:
  752. return (im, im_info, label_info)
  753. augment_shape = im_info['augment_shape']
  754. w = augment_shape[1]
  755. h = augment_shape[0]
  756. gt_bbox = label_info['gt_bbox']
  757. thresholds = list(self.thresholds)
  758. if self.allow_no_crop:
  759. thresholds.append('no_crop')
  760. np.random.shuffle(thresholds)
  761. for thresh in thresholds:
  762. if thresh == 'no_crop':
  763. return (im, im_info, label_info)
  764. found = False
  765. for i in range(self.num_attempts):
  766. scale = np.random.uniform(*self.scaling)
  767. min_ar, max_ar = self.aspect_ratio
  768. aspect_ratio = np.random.uniform(
  769. max(min_ar, scale**2), min(max_ar, scale**-2))
  770. crop_h = int(h * scale / np.sqrt(aspect_ratio))
  771. crop_w = int(w * scale * np.sqrt(aspect_ratio))
  772. crop_y = np.random.randint(0, h - crop_h)
  773. crop_x = np.random.randint(0, w - crop_w)
  774. crop_box = [crop_x, crop_y, crop_x + crop_w, crop_y + crop_h]
  775. iou = iou_matrix(gt_bbox, np.array([crop_box],
  776. dtype=np.float32))
  777. if iou.max() < thresh:
  778. continue
  779. if self.cover_all_box and iou.min() < thresh:
  780. continue
  781. cropped_box, valid_ids = crop_box_with_center_constraint(
  782. gt_bbox, np.array(crop_box, dtype=np.float32))
  783. if valid_ids.size > 0:
  784. found = True
  785. break
  786. if found:
  787. if 'gt_poly' in label_info and len(label_info['gt_poly']) > 0:
  788. crop_polys = crop_segms(label_info['gt_poly'], valid_ids,
  789. np.array(crop_box, dtype=np.int64),
  790. h, w)
  791. if [] in crop_polys:
  792. delete_id = list()
  793. valid_polys = list()
  794. for id, crop_poly in enumerate(crop_polys):
  795. if crop_poly == []:
  796. delete_id.append(id)
  797. else:
  798. valid_polys.append(crop_poly)
  799. valid_ids = np.delete(valid_ids, delete_id)
  800. if len(valid_polys) == 0:
  801. return (im, im_info, label_info)
  802. label_info['gt_poly'] = valid_polys
  803. else:
  804. label_info['gt_poly'] = crop_polys
  805. im = crop_image(im, crop_box)
  806. label_info['gt_bbox'] = np.take(cropped_box, valid_ids, axis=0)
  807. label_info['gt_class'] = np.take(
  808. label_info['gt_class'], valid_ids, axis=0)
  809. im_info['augment_shape'] = np.array(
  810. [crop_box[3] - crop_box[1],
  811. crop_box[2] - crop_box[0]]).astype('int32')
  812. if 'gt_score' in label_info:
  813. label_info['gt_score'] = np.take(
  814. label_info['gt_score'], valid_ids, axis=0)
  815. if 'is_crowd' in label_info:
  816. label_info['is_crowd'] = np.take(
  817. label_info['is_crowd'], valid_ids, axis=0)
  818. return (im, im_info, label_info)
  819. return (im, im_info, label_info)
  820. class ArrangeFasterRCNN:
  821. """获取FasterRCNN模型训练/验证/预测所需信息。
  822. Args:
  823. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  824. Raises:
  825. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  826. """
  827. def __init__(self, mode=None):
  828. if mode not in ['train', 'eval', 'test', 'quant']:
  829. raise ValueError(
  830. "mode must be in ['train', 'eval', 'test', 'quant']!")
  831. self.mode = mode
  832. def __call__(self, im, im_info=None, label_info=None):
  833. """
  834. Args:
  835. im (np.ndarray): 图像np.ndarray数据。
  836. im_info (dict, 可选): 存储与图像相关的信息。
  837. label_info (dict, 可选): 存储与标注框相关的信息。
  838. Returns:
  839. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd),分别对应
  840. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象;
  841. 当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape, gt_bbox, gt_class, is_difficult),
  842. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像id、图像大小信息、真实标注框、真实标注框对应的类别、
  843. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),分别对应图像np.ndarray数据、
  844. 图像相当对于原图的resize信息、图像大小信息。
  845. Raises:
  846. TypeError: 形参数据类型不满足需求。
  847. ValueError: 数据长度不匹配。
  848. """
  849. im = permute(im, False)
  850. if self.mode == 'train':
  851. if im_info is None or label_info is None:
  852. raise TypeError(
  853. 'Cannot do ArrangeFasterRCNN! ' +
  854. 'Becasuse the im_info and label_info can not be None!')
  855. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  856. raise ValueError("gt num mismatch: bbox and class.")
  857. im_resize_info = im_info['im_resize_info']
  858. gt_bbox = label_info['gt_bbox']
  859. gt_class = label_info['gt_class']
  860. is_crowd = label_info['is_crowd']
  861. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd)
  862. elif self.mode == 'eval':
  863. if im_info is None or label_info is None:
  864. raise TypeError(
  865. 'Cannot do ArrangeFasterRCNN! ' +
  866. 'Becasuse the im_info and label_info can not be None!')
  867. im_resize_info = im_info['im_resize_info']
  868. im_id = im_info['im_id']
  869. im_shape = np.array(
  870. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  871. dtype=np.float32)
  872. gt_bbox = label_info['gt_bbox']
  873. gt_class = label_info['gt_class']
  874. is_difficult = label_info['difficult']
  875. outputs = (im, im_resize_info, im_id, im_shape, gt_bbox, gt_class,
  876. is_difficult)
  877. else:
  878. if im_info is None:
  879. raise TypeError('Cannot do ArrangeFasterRCNN! ' +
  880. 'Becasuse the im_info can not be None!')
  881. im_resize_info = im_info['im_resize_info']
  882. im_shape = np.array(
  883. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  884. dtype=np.float32)
  885. outputs = (im, im_resize_info, im_shape)
  886. return outputs
  887. class ArrangeMaskRCNN:
  888. """获取MaskRCNN模型训练/验证/预测所需信息。
  889. Args:
  890. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  891. Raises:
  892. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  893. """
  894. def __init__(self, mode=None):
  895. if mode not in ['train', 'eval', 'test', 'quant']:
  896. raise ValueError(
  897. "mode must be in ['train', 'eval', 'test', 'quant']!")
  898. self.mode = mode
  899. def __call__(self, im, im_info=None, label_info=None):
  900. """
  901. Args:
  902. im (np.ndarray): 图像np.ndarray数据。
  903. im_info (dict, 可选): 存储与图像相关的信息。
  904. label_info (dict, 可选): 存储与标注框相关的信息。
  905. Returns:
  906. tuple: 当mode为'train'时,返回(im, im_resize_info, gt_bbox, gt_class, is_crowd, gt_masks),分别对应
  907. 图像np.ndarray数据、图像相当对于原图的resize信息、真实标注框、真实标注框对应的类别、真实标注框内是否是一组对象、
  908. 真实分割区域;当mode为'eval'时,返回(im, im_resize_info, im_id, im_shape),分别对应图像np.ndarray数据、
  909. 图像相当对于原图的resize信息、图像id、图像大小信息;当mode为'test'或'quant'时,返回(im, im_resize_info, im_shape),
  910. 分别对应图像np.ndarray数据、图像相当对于原图的resize信息、图像大小信息。
  911. Raises:
  912. TypeError: 形参数据类型不满足需求。
  913. ValueError: 数据长度不匹配。
  914. """
  915. im = permute(im, False)
  916. if self.mode == 'train':
  917. if im_info is None or label_info is None:
  918. raise TypeError(
  919. 'Cannot do ArrangeTrainMaskRCNN! ' +
  920. 'Becasuse the im_info and label_info can not be None!')
  921. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  922. raise ValueError("gt num mismatch: bbox and class.")
  923. im_resize_info = im_info['im_resize_info']
  924. gt_bbox = label_info['gt_bbox']
  925. gt_class = label_info['gt_class']
  926. is_crowd = label_info['is_crowd']
  927. assert 'gt_poly' in label_info
  928. segms = label_info['gt_poly']
  929. if len(segms) != 0:
  930. assert len(segms) == is_crowd.shape[0]
  931. gt_masks = []
  932. valid = True
  933. for i in range(len(segms)):
  934. segm = segms[i]
  935. gt_segm = []
  936. if is_crowd[i]:
  937. gt_segm.append([[0, 0]])
  938. else:
  939. for poly in segm:
  940. if len(poly) == 0:
  941. valid = False
  942. break
  943. gt_segm.append(np.array(poly).reshape(-1, 2))
  944. if (not valid) or len(gt_segm) == 0:
  945. break
  946. gt_masks.append(gt_segm)
  947. outputs = (im, im_resize_info, gt_bbox, gt_class, is_crowd,
  948. gt_masks)
  949. else:
  950. if im_info is None:
  951. raise TypeError('Cannot do ArrangeMaskRCNN! ' +
  952. 'Becasuse the im_info can not be None!')
  953. im_resize_info = im_info['im_resize_info']
  954. im_shape = np.array(
  955. (im_info['augment_shape'][0], im_info['augment_shape'][1], 1),
  956. dtype=np.float32)
  957. if self.mode == 'eval':
  958. im_id = im_info['im_id']
  959. outputs = (im, im_resize_info, im_id, im_shape)
  960. else:
  961. outputs = (im, im_resize_info, im_shape)
  962. return outputs
  963. class ArrangeYOLOv3:
  964. """获取YOLOv3模型训练/验证/预测所需信息。
  965. Args:
  966. mode (str): 指定数据用于何种用途,取值范围为['train', 'eval', 'test', 'quant']。
  967. Raises:
  968. ValueError: mode的取值不在['train', 'eval', 'test', 'quant']之内。
  969. """
  970. def __init__(self, mode=None):
  971. if mode not in ['train', 'eval', 'test', 'quant']:
  972. raise ValueError(
  973. "mode must be in ['train', 'eval', 'test', 'quant']!")
  974. self.mode = mode
  975. def __call__(self, im, im_info=None, label_info=None):
  976. """
  977. Args:
  978. im (np.ndarray): 图像np.ndarray数据。
  979. im_info (dict, 可选): 存储与图像相关的信息。
  980. label_info (dict, 可选): 存储与标注框相关的信息。
  981. Returns:
  982. tuple: 当mode为'train'时,返回(im, gt_bbox, gt_class, gt_score, im_shape),分别对应
  983. 图像np.ndarray数据、真实标注框、真实标注框对应的类别、真实标注框混合得分、图像大小信息;
  984. 当mode为'eval'时,返回(im, im_shape, im_id, gt_bbox, gt_class, difficult),
  985. 分别对应图像np.ndarray数据、图像大小信息、图像id、真实标注框、真实标注框对应的类别、
  986. 真实标注框是否为难识别对象;当mode为'test'或'quant'时,返回(im, im_shape),
  987. 分别对应图像np.ndarray数据、图像大小信息。
  988. Raises:
  989. TypeError: 形参数据类型不满足需求。
  990. ValueError: 数据长度不匹配。
  991. """
  992. im = permute(im, False)
  993. if self.mode == 'train':
  994. if im_info is None or label_info is None:
  995. raise TypeError(
  996. 'Cannot do ArrangeYolov3! ' +
  997. 'Becasuse the im_info and label_info can not be None!')
  998. im_shape = im_info['augment_shape']
  999. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1000. raise ValueError("gt num mismatch: bbox and class.")
  1001. if len(label_info['gt_bbox']) != len(label_info['gt_score']):
  1002. raise ValueError("gt num mismatch: bbox and score.")
  1003. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1004. gt_class = np.zeros((50, ), dtype=np.int32)
  1005. gt_score = np.zeros((50, ), dtype=im.dtype)
  1006. gt_num = min(50, len(label_info['gt_bbox']))
  1007. if gt_num > 0:
  1008. label_info['gt_class'][:gt_num, 0] = label_info[
  1009. 'gt_class'][:gt_num, 0] - 1
  1010. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1011. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1012. gt_score[:gt_num] = label_info['gt_score'][:gt_num, 0]
  1013. # parse [x1, y1, x2, y2] to [x, y, w, h]
  1014. gt_bbox[:, 2:4] = gt_bbox[:, 2:4] - gt_bbox[:, :2]
  1015. gt_bbox[:, :2] = gt_bbox[:, :2] + gt_bbox[:, 2:4] / 2.
  1016. outputs = (im, gt_bbox, gt_class, gt_score, im_shape)
  1017. elif self.mode == 'eval':
  1018. if im_info is None or label_info is None:
  1019. raise TypeError(
  1020. 'Cannot do ArrangeYolov3! ' +
  1021. 'Becasuse the im_info and label_info can not be None!')
  1022. im_shape = im_info['augment_shape']
  1023. if len(label_info['gt_bbox']) != len(label_info['gt_class']):
  1024. raise ValueError("gt num mismatch: bbox and class.")
  1025. im_id = im_info['im_id']
  1026. gt_bbox = np.zeros((50, 4), dtype=im.dtype)
  1027. gt_class = np.zeros((50, ), dtype=np.int32)
  1028. difficult = np.zeros((50, ), dtype=np.int32)
  1029. gt_num = min(50, len(label_info['gt_bbox']))
  1030. if gt_num > 0:
  1031. label_info['gt_class'][:gt_num, 0] = label_info[
  1032. 'gt_class'][:gt_num, 0] - 1
  1033. gt_bbox[:gt_num, :] = label_info['gt_bbox'][:gt_num, :]
  1034. gt_class[:gt_num] = label_info['gt_class'][:gt_num, 0]
  1035. difficult[:gt_num] = label_info['difficult'][:gt_num, 0]
  1036. outputs = (im, im_shape, im_id, gt_bbox, gt_class, difficult)
  1037. else:
  1038. if im_info is None:
  1039. raise TypeError('Cannot do ArrangeYolov3! ' +
  1040. 'Becasuse the im_info can not be None!')
  1041. im_shape = im_info['augment_shape']
  1042. outputs = (im, im_shape)
  1043. return outputs