metrics.py 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. from sklearn.metrics import hamming_loss
  18. from sklearn.metrics import accuracy_score as accuracy_metric
  19. from sklearn.metrics import multilabel_confusion_matrix
  20. from sklearn.metrics import precision_recall_fscore_support
  21. from sklearn.metrics import average_precision_score
  22. from sklearn.preprocessing import binarize
  23. import numpy as np
  24. __all__ = ["multi_hot_encode", "hamming_distance", "accuracy_score", "precision_recall_fscore", "mean_average_precision"]
  25. def multi_hot_encode(logits, threshold=0.5):
  26. """
  27. Encode logits to multi-hot by elementwise for multilabel
  28. """
  29. return binarize(logits, threshold)
  30. def hamming_distance(output, target):
  31. """
  32. Soft metric based label for multilabel classification
  33. Returns:
  34. The smaller the return value is, the better model is.
  35. """
  36. return hamming_loss(target, output)
  37. def accuracy_score(output, target, base="sample"):
  38. """
  39. Hard metric for multilabel classification
  40. Args:
  41. output:
  42. target:
  43. base: ["sample", "label"], default="sample"
  44. if "sample", return metric score based sample,
  45. if "label", return metric score based label.
  46. Returns:
  47. accuracy:
  48. """
  49. assert base in ["sample", "label"], 'must be one of ["sample", "label"]'
  50. if base == "sample":
  51. accuracy = accuracy_metric(target, output)
  52. elif base == "label":
  53. mcm = multilabel_confusion_matrix(target, output)
  54. tns = mcm[:, 0, 0]
  55. fns = mcm[:, 1, 0]
  56. tps = mcm[:, 1, 1]
  57. fps = mcm[:, 0, 1]
  58. accuracy = (sum(tps) + sum(tns)) / (sum(tps) + sum(tns) + sum(fns) + sum(fps))
  59. return accuracy
  60. def precision_recall_fscore(output, target):
  61. """
  62. Metric based label for multilabel classification
  63. Returns:
  64. precisions:
  65. recalls:
  66. fscores:
  67. """
  68. precisions, recalls, fscores, _ = precision_recall_fscore_support(target, output)
  69. return precisions, recalls, fscores
  70. def mean_average_precision(logits, target):
  71. """
  72. Calculate average precision
  73. Args:
  74. logits: probability from network before sigmoid or softmax
  75. target: ground truth, 0 or 1
  76. """
  77. if not (isinstance(logits, np.ndarray) and isinstance(target, np.ndarray)):
  78. raise TypeError("logits and target should be np.ndarray.")
  79. aps = []
  80. for i in range(target.shape[1]):
  81. ap = average_precision_score(target[:, i], logits[:, i])
  82. aps.append(ap)
  83. return np.mean(aps)