deeplabv3p.py 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490
  1. # coding: utf8
  2. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. from __future__ import absolute_import
  16. from __future__ import division
  17. from __future__ import print_function
  18. from collections import OrderedDict
  19. import paddle.fluid as fluid
  20. from .model_utils.libs import scope, name_scope
  21. from .model_utils.libs import bn, bn_relu, relu, qsigmoid
  22. from .model_utils.libs import conv, max_pool, deconv
  23. from .model_utils.libs import separate_conv
  24. from .model_utils.libs import sigmoid_to_softmax
  25. from .model_utils.loss import softmax_with_loss
  26. from .model_utils.loss import dice_loss
  27. from .model_utils.loss import bce_loss
  28. from paddlex.cv.nets.xception import Xception
  29. from paddlex.cv.nets.mobilenet_v2 import MobileNetV2
  30. class DeepLabv3p(object):
  31. """实现DeepLabv3+模型
  32. `"Encoder-Decoder with Atrous Separable Convolution for Semantic Image Segmentation"
  33. <https://arxiv.org/abs/1802.02611>`
  34. Args:
  35. num_classes (int): 类别数。
  36. backbone (paddlex.cv.nets): 神经网络,实现DeepLabv3+特征图的计算。
  37. mode (str): 网络运行模式,根据mode构建网络的输入和返回。
  38. 当mode为'train'时,输入为image(-1, 3, -1, -1)和label (-1, 1, -1, -1) 返回loss。
  39. 当mode为'train'时,输入为image (-1, 3, -1, -1)和label (-1, 1, -1, -1),返回loss,
  40. pred (与网络输入label 相同大小的预测结果,值代表相应的类别),label,mask(非忽略值的mask,
  41. 与label相同大小,bool类型)。
  42. 当mode为'test'时,输入为image(-1, 3, -1, -1)返回pred (-1, 1, -1, -1)和
  43. logit (-1, num_classes, -1, -1) 通道维上代表每一类的概率值。
  44. output_stride (int): backbone 输出特征图相对于输入的下采样倍数,一般取值为8或16。
  45. aspp_with_sep_conv (bool): 在asspp模块是否采用separable convolutions。
  46. decoder_use_sep_conv (bool): decoder模块是否采用separable convolutions。
  47. encoder_with_aspp (bool): 是否在encoder阶段采用aspp模块。
  48. enable_decoder (bool): 是否使用decoder模块。
  49. use_bce_loss (bool): 是否使用bce loss作为网络的损失函数,只能用于两类分割。可与dice loss同时使用。
  50. use_dice_loss (bool): 是否使用dice loss作为网络的损失函数,只能用于两类分割,可与bce loss同时使用。
  51. 当use_bce_loss和use_dice_loss都为False时,使用交叉熵损失函数。
  52. class_weight (list/str): 交叉熵损失函数各类损失的权重。当class_weight为list的时候,长度应为
  53. num_classes。当class_weight为str时, weight.lower()应为'dynamic',这时会根据每一轮各类像素的比重
  54. 自行计算相应的权重,每一类的权重为:每类的比例 * num_classes。class_weight取默认值None是,各类的权重1,
  55. 即平时使用的交叉熵损失函数。
  56. ignore_index (int): label上忽略的值,label为ignore_index的像素不参与损失函数的计算。
  57. fixed_input_shape (list): 长度为2,维度为1的list,如:[640,720],用来固定模型输入:'image'的shape,默认为None。
  58. Raises:
  59. ValueError: use_bce_loss或use_dice_loss为真且num_calsses > 2。
  60. ValueError: class_weight为list, 但长度不等于num_class。
  61. class_weight为str, 但class_weight.low()不等于dynamic。
  62. TypeError: class_weight不为None时,其类型不是list或str。
  63. """
  64. def __init__(self,
  65. num_classes,
  66. backbone,
  67. input_channel=3,
  68. mode='train',
  69. output_stride=16,
  70. aspp_with_sep_conv=True,
  71. decoder_use_sep_conv=True,
  72. encoder_with_aspp=True,
  73. enable_decoder=True,
  74. use_bce_loss=False,
  75. use_dice_loss=False,
  76. class_weight=None,
  77. ignore_index=255,
  78. fixed_input_shape=None,
  79. pooling_stride=[1, 1],
  80. pooling_crop_size=None,
  81. aspp_with_se=False,
  82. se_use_qsigmoid=False,
  83. aspp_convs_filters=256,
  84. aspp_with_concat_projection=True,
  85. add_image_level_feature=True,
  86. use_sum_merge=False,
  87. conv_filters=256,
  88. output_is_logits=False):
  89. # dice_loss或bce_loss只适用两类分割中
  90. if num_classes > 2 and (use_bce_loss or use_dice_loss):
  91. raise ValueError(
  92. "dice loss and bce loss is only applicable to binary classfication"
  93. )
  94. if class_weight is not None:
  95. if isinstance(class_weight, list):
  96. if len(class_weight) != num_classes:
  97. raise ValueError(
  98. "Length of class_weight should be equal to number of classes"
  99. )
  100. elif isinstance(class_weight, str):
  101. if class_weight.lower() != 'dynamic':
  102. raise ValueError(
  103. "if class_weight is string, must be dynamic!")
  104. else:
  105. raise TypeError(
  106. 'Expect class_weight is a list or string but receive {}'.
  107. format(type(class_weight)))
  108. self.num_classes = num_classes
  109. self.input_channel = input_channel
  110. self.backbone = backbone
  111. self.mode = mode
  112. self.use_bce_loss = use_bce_loss
  113. self.use_dice_loss = use_dice_loss
  114. self.class_weight = class_weight
  115. self.ignore_index = ignore_index
  116. self.output_stride = output_stride
  117. self.aspp_with_sep_conv = aspp_with_sep_conv
  118. self.decoder_use_sep_conv = decoder_use_sep_conv
  119. self.encoder_with_aspp = encoder_with_aspp
  120. self.enable_decoder = enable_decoder
  121. self.fixed_input_shape = fixed_input_shape
  122. self.output_is_logits = output_is_logits
  123. self.aspp_convs_filters = aspp_convs_filters
  124. self.output_stride = output_stride
  125. self.pooling_crop_size = pooling_crop_size
  126. self.pooling_stride = pooling_stride
  127. self.se_use_qsigmoid = se_use_qsigmoid
  128. self.aspp_with_concat_projection = aspp_with_concat_projection
  129. self.add_image_level_feature = add_image_level_feature
  130. self.aspp_with_se = aspp_with_se
  131. self.use_sum_merge = use_sum_merge
  132. self.conv_filters = conv_filters
  133. def _encoder(self, input):
  134. # 编码器配置,采用ASPP架构,pooling + 1x1_conv + 三个不同尺度的空洞卷积并行, concat后1x1conv
  135. # ASPP_WITH_SEP_CONV:默认为真,使用depthwise可分离卷积,否则使用普通卷积
  136. # OUTPUT_STRIDE: 下采样倍数,8或16,决定aspp_ratios大小
  137. # aspp_ratios:ASPP模块空洞卷积的采样率
  138. if self.output_stride == 16:
  139. aspp_ratios = [6, 12, 18]
  140. elif self.output_stride == 8:
  141. aspp_ratios = [12, 24, 36]
  142. else:
  143. aspp_ratios = []
  144. param_attr = fluid.ParamAttr(
  145. name=name_scope + 'weights',
  146. regularizer=None,
  147. initializer=fluid.initializer.TruncatedNormal(
  148. loc=0.0, scale=0.06))
  149. concat_logits = []
  150. with scope('encoder'):
  151. channel = self.aspp_convs_filters
  152. with scope("image_pool"):
  153. if self.pooling_crop_size is None:
  154. image_avg = fluid.layers.reduce_mean(
  155. input, [2, 3], keep_dim=True)
  156. else:
  157. pool_w = int((self.pooling_crop_size[0] - 1.0) /
  158. self.output_stride + 1.0)
  159. pool_h = int((self.pooling_crop_size[1] - 1.0) /
  160. self.output_stride + 1.0)
  161. image_avg = fluid.layers.pool2d(
  162. input,
  163. pool_size=(pool_h, pool_w),
  164. pool_stride=self.pooling_stride,
  165. pool_type='avg',
  166. pool_padding='VALID')
  167. act = qsigmoid if self.se_use_qsigmoid else bn_relu
  168. image_avg = act(
  169. conv(
  170. image_avg,
  171. channel,
  172. 1,
  173. 1,
  174. groups=1,
  175. padding=0,
  176. param_attr=param_attr))
  177. input_shape = fluid.layers.shape(input)
  178. image_avg = fluid.layers.resize_bilinear(image_avg,
  179. input_shape[2:])
  180. if self.add_image_level_feature:
  181. concat_logits.append(image_avg)
  182. with scope("aspp0"):
  183. aspp0 = bn_relu(
  184. conv(
  185. input,
  186. channel,
  187. 1,
  188. 1,
  189. groups=1,
  190. padding=0,
  191. param_attr=param_attr))
  192. concat_logits.append(aspp0)
  193. if aspp_ratios:
  194. with scope("aspp1"):
  195. if self.aspp_with_sep_conv:
  196. aspp1 = separate_conv(
  197. input,
  198. channel,
  199. 1,
  200. 3,
  201. dilation=aspp_ratios[0],
  202. act=relu)
  203. else:
  204. aspp1 = bn_relu(
  205. conv(
  206. input,
  207. channel,
  208. stride=1,
  209. filter_size=3,
  210. dilation=aspp_ratios[0],
  211. padding=aspp_ratios[0],
  212. param_attr=param_attr))
  213. concat_logits.append(aspp1)
  214. with scope("aspp2"):
  215. if self.aspp_with_sep_conv:
  216. aspp2 = separate_conv(
  217. input,
  218. channel,
  219. 1,
  220. 3,
  221. dilation=aspp_ratios[1],
  222. act=relu)
  223. else:
  224. aspp2 = bn_relu(
  225. conv(
  226. input,
  227. channel,
  228. stride=1,
  229. filter_size=3,
  230. dilation=aspp_ratios[1],
  231. padding=aspp_ratios[1],
  232. param_attr=param_attr))
  233. concat_logits.append(aspp2)
  234. with scope("aspp3"):
  235. if self.aspp_with_sep_conv:
  236. aspp3 = separate_conv(
  237. input,
  238. channel,
  239. 1,
  240. 3,
  241. dilation=aspp_ratios[2],
  242. act=relu)
  243. else:
  244. aspp3 = bn_relu(
  245. conv(
  246. input,
  247. channel,
  248. stride=1,
  249. filter_size=3,
  250. dilation=aspp_ratios[2],
  251. padding=aspp_ratios[2],
  252. param_attr=param_attr))
  253. concat_logits.append(aspp3)
  254. with scope("concat"):
  255. data = fluid.layers.concat(concat_logits, axis=1)
  256. if self.aspp_with_concat_projection:
  257. data = bn_relu(
  258. conv(
  259. data,
  260. channel,
  261. 1,
  262. 1,
  263. groups=1,
  264. padding=0,
  265. param_attr=param_attr))
  266. data = fluid.layers.dropout(data, 0.9)
  267. if self.aspp_with_se:
  268. data = data * image_avg
  269. return data
  270. def _decoder_with_sum_merge(self, encode_data, decode_shortcut,
  271. param_attr):
  272. decode_shortcut_shape = fluid.layers.shape(decode_shortcut)
  273. encode_data = fluid.layers.resize_bilinear(encode_data,
  274. decode_shortcut_shape[2:])
  275. encode_data = conv(
  276. encode_data,
  277. self.conv_filters,
  278. 1,
  279. 1,
  280. groups=1,
  281. padding=0,
  282. param_attr=param_attr)
  283. with scope('merge'):
  284. decode_shortcut = conv(
  285. decode_shortcut,
  286. self.conv_filters,
  287. 1,
  288. 1,
  289. groups=1,
  290. padding=0,
  291. param_attr=param_attr)
  292. return encode_data + decode_shortcut
  293. def _decoder_with_concat(self, encode_data, decode_shortcut, param_attr):
  294. with scope('concat'):
  295. decode_shortcut = bn_relu(
  296. conv(
  297. decode_shortcut,
  298. 48,
  299. 1,
  300. 1,
  301. groups=1,
  302. padding=0,
  303. param_attr=param_attr))
  304. decode_shortcut_shape = fluid.layers.shape(decode_shortcut)
  305. encode_data = fluid.layers.resize_bilinear(
  306. encode_data, decode_shortcut_shape[2:])
  307. encode_data = fluid.layers.concat(
  308. [encode_data, decode_shortcut], axis=1)
  309. if self.decoder_use_sep_conv:
  310. with scope("separable_conv1"):
  311. encode_data = separate_conv(
  312. encode_data, self.conv_filters, 1, 3, dilation=1, act=relu)
  313. with scope("separable_conv2"):
  314. encode_data = separate_conv(
  315. encode_data, self.conv_filters, 1, 3, dilation=1, act=relu)
  316. else:
  317. with scope("decoder_conv1"):
  318. encode_data = bn_relu(
  319. conv(
  320. encode_data,
  321. self.conv_filters,
  322. stride=1,
  323. filter_size=3,
  324. dilation=1,
  325. padding=1,
  326. param_attr=param_attr))
  327. with scope("decoder_conv2"):
  328. encode_data = bn_relu(
  329. conv(
  330. encode_data,
  331. self.conv_filters,
  332. stride=1,
  333. filter_size=3,
  334. dilation=1,
  335. padding=1,
  336. param_attr=param_attr))
  337. return encode_data
  338. def _decoder(self, encode_data, decode_shortcut):
  339. # 解码器配置
  340. # encode_data:编码器输出
  341. # decode_shortcut: 从backbone引出的分支, resize后与encode_data concat
  342. # decoder_use_sep_conv: 默认为真,则concat后连接两个可分离卷积,否则为普通卷积
  343. param_attr = fluid.ParamAttr(
  344. name=name_scope + 'weights',
  345. regularizer=None,
  346. initializer=fluid.initializer.TruncatedNormal(
  347. loc=0.0, scale=0.06))
  348. with scope('decoder'):
  349. if self.use_sum_merge:
  350. return self._decoder_with_sum_merge(
  351. encode_data, decode_shortcut, param_attr)
  352. return self._decoder_with_concat(encode_data, decode_shortcut,
  353. param_attr)
  354. def _get_loss(self, logit, label, mask):
  355. avg_loss = 0
  356. if not (self.use_dice_loss or self.use_bce_loss):
  357. avg_loss += softmax_with_loss(
  358. logit,
  359. label,
  360. mask,
  361. num_classes=self.num_classes,
  362. weight=self.class_weight,
  363. ignore_index=self.ignore_index)
  364. else:
  365. if self.use_dice_loss:
  366. avg_loss += dice_loss(logit, label, mask)
  367. if self.use_bce_loss:
  368. avg_loss += bce_loss(
  369. logit, label, mask, ignore_index=self.ignore_index)
  370. return avg_loss
  371. def generate_inputs(self):
  372. inputs = OrderedDict()
  373. if self.fixed_input_shape is not None:
  374. input_shape = [
  375. None, self.input_channel, self.fixed_input_shape[1],
  376. self.fixed_input_shape[0]
  377. ]
  378. inputs['image'] = fluid.data(
  379. dtype='float32', shape=input_shape, name='image')
  380. else:
  381. inputs['image'] = fluid.data(
  382. dtype='float32',
  383. shape=[None, self.input_channel, None, None],
  384. name='image')
  385. if self.mode == 'train':
  386. inputs['label'] = fluid.data(
  387. dtype='int32', shape=[None, 1, None, None], name='label')
  388. return inputs
  389. def build_net(self, inputs):
  390. # 在两类分割情况下,当loss函数选择dice_loss或bce_loss的时候,最后logit输出通道数设置为1
  391. if self.use_dice_loss or self.use_bce_loss:
  392. self.num_classes = 1
  393. image = inputs['image']
  394. if 'MobileNetV3' in self.backbone.__class__.__name__:
  395. data, decode_shortcut = self.backbone(image)
  396. else:
  397. data, decode_shortcuts = self.backbone(image)
  398. decode_shortcut = decode_shortcuts[self.backbone.decode_points]
  399. # 编码器解码器设置
  400. if self.encoder_with_aspp:
  401. data = self._encoder(data)
  402. if self.enable_decoder:
  403. data = self._decoder(data, decode_shortcut)
  404. # 根据类别数设置最后一个卷积层输出,并resize到图片原始尺寸
  405. param_attr = fluid.ParamAttr(
  406. name=name_scope + 'weights',
  407. regularizer=fluid.regularizer.L2DecayRegularizer(
  408. regularization_coeff=0.0),
  409. initializer=fluid.initializer.TruncatedNormal(
  410. loc=0.0, scale=0.01))
  411. if not self.output_is_logits:
  412. with scope('logit'):
  413. with fluid.name_scope('last_conv'):
  414. logit = conv(
  415. data,
  416. self.num_classes,
  417. 1,
  418. stride=1,
  419. padding=0,
  420. bias_attr=True,
  421. param_attr=param_attr)
  422. else:
  423. logit = data
  424. image_shape = fluid.layers.shape(image)
  425. logit = fluid.layers.resize_bilinear(logit, image_shape[2:])
  426. if self.num_classes == 1:
  427. out = sigmoid_to_softmax(logit)
  428. out = fluid.layers.transpose(out, [0, 2, 3, 1])
  429. else:
  430. out = fluid.layers.transpose(logit, [0, 2, 3, 1])
  431. pred = fluid.layers.argmax(out, axis=3)
  432. pred = fluid.layers.unsqueeze(pred, axes=[3])
  433. if self.mode == 'train':
  434. label = inputs['label']
  435. mask = label != self.ignore_index
  436. return self._get_loss(logit, label, mask)
  437. elif self.mode == 'eval':
  438. label = inputs['label']
  439. mask = label != self.ignore_index
  440. loss = self._get_loss(logit, label, mask)
  441. return loss, pred, label, mask
  442. else:
  443. if self.num_classes == 1:
  444. logit = sigmoid_to_softmax(logit)
  445. else:
  446. logit = fluid.layers.softmax(logit, axis=1)
  447. return pred, logit
  448. return logit