loss.py 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117
  1. # copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import paddle.fluid as fluid
  15. import numpy as np
  16. def softmax_with_loss(logit,
  17. label,
  18. ignore_mask=None,
  19. num_classes=2,
  20. weight=None,
  21. ignore_index=255):
  22. ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
  23. label = fluid.layers.elementwise_min(
  24. label,
  25. fluid.layers.assign(np.array(
  26. [num_classes - 1], dtype=np.int32)))
  27. logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
  28. logit = fluid.layers.reshape(logit, [-1, num_classes])
  29. label = fluid.layers.reshape(label, [-1, 1])
  30. label = fluid.layers.cast(label, 'int64')
  31. ignore_mask = fluid.layers.reshape(ignore_mask, [-1, 1])
  32. if weight is None:
  33. loss, probs = fluid.layers.softmax_with_cross_entropy(
  34. logit, label, ignore_index=ignore_index, return_softmax=True)
  35. else:
  36. label_one_hot = fluid.layers.one_hot(input=label, depth=num_classes)
  37. if isinstance(weight, list):
  38. assert len(
  39. weight
  40. ) == num_classes, "weight length must equal num of classes"
  41. weight = fluid.layers.assign(np.array([weight], dtype='float32'))
  42. elif isinstance(weight, str):
  43. assert weight.lower(
  44. ) == 'dynamic', 'if weight is string, must be dynamic!'
  45. tmp = []
  46. total_num = fluid.layers.cast(
  47. fluid.layers.shape(label)[0], 'float32')
  48. for i in range(num_classes):
  49. cls_pixel_num = fluid.layers.reduce_sum(label_one_hot[:, i])
  50. ratio = total_num / (cls_pixel_num + 1)
  51. tmp.append(ratio)
  52. weight = fluid.layers.concat(tmp)
  53. weight = weight / fluid.layers.reduce_sum(weight) * num_classes
  54. elif isinstance(weight, fluid.layers.Variable):
  55. pass
  56. else:
  57. raise ValueError(
  58. 'Expect weight is a list, string or Variable, but receive {}'.
  59. format(type(weight)))
  60. weight = fluid.layers.reshape(weight, [1, num_classes])
  61. weighted_label_one_hot = fluid.layers.elementwise_mul(label_one_hot,
  62. weight)
  63. probs = fluid.layers.softmax(logit)
  64. loss = fluid.layers.cross_entropy(
  65. probs,
  66. weighted_label_one_hot,
  67. soft_label=True,
  68. ignore_index=ignore_index)
  69. weighted_label_one_hot.stop_gradient = True
  70. loss = loss * ignore_mask
  71. avg_loss = fluid.layers.mean(loss) / (
  72. fluid.layers.mean(ignore_mask) + 0.00001)
  73. label.stop_gradient = True
  74. ignore_mask.stop_gradient = True
  75. return avg_loss
  76. # to change, how to appicate ignore index and ignore mask
  77. def dice_loss(logit, label, ignore_mask=None, epsilon=0.00001):
  78. if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
  79. raise Exception(
  80. "dice loss is only applicable to one channel classfication")
  81. ignore_mask = fluid.layers.cast(ignore_mask, 'float32')
  82. logit = fluid.layers.transpose(logit, [0, 2, 3, 1])
  83. label = fluid.layers.transpose(label, [0, 2, 3, 1])
  84. label = fluid.layers.cast(label, 'int64')
  85. ignore_mask = fluid.layers.transpose(ignore_mask, [0, 2, 3, 1])
  86. logit = fluid.layers.sigmoid(logit)
  87. logit = logit * ignore_mask
  88. label = label * ignore_mask
  89. reduce_dim = list(range(1, len(logit.shape)))
  90. inse = fluid.layers.reduce_sum(logit * label, dim=reduce_dim)
  91. dice_denominator = fluid.layers.reduce_sum(
  92. logit, dim=reduce_dim) + fluid.layers.reduce_sum(
  93. label, dim=reduce_dim)
  94. dice_score = 1 - inse * 2 / (dice_denominator + epsilon)
  95. label.stop_gradient = True
  96. ignore_mask.stop_gradient = True
  97. return fluid.layers.reduce_mean(dice_score)
  98. def bce_loss(logit, label, ignore_mask=None, ignore_index=255):
  99. if logit.shape[1] != 1 or label.shape[1] != 1 or ignore_mask.shape[1] != 1:
  100. raise Exception("bce loss is only applicable to binary classfication")
  101. label = fluid.layers.cast(label, 'float32')
  102. loss = fluid.layers.sigmoid_cross_entropy_with_logits(
  103. x=logit, label=label, ignore_index=ignore_index,
  104. normalize=True) # or False
  105. loss = fluid.layers.reduce_sum(loss)
  106. label.stop_gradient = True
  107. ignore_mask.stop_gradient = True
  108. return loss