metrics.py 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300
  1. # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from __future__ import absolute_import
  15. from __future__ import division
  16. from __future__ import print_function
  17. import os
  18. import sys
  19. import json
  20. import paddle
  21. import numpy as np
  22. from .map_utils import prune_zero_padding, DetectionMAP
  23. from .coco_utils import get_infer_results, cocoapi_eval
  24. from .widerface_utils import face_eval_run
  25. from paddlex.ppdet.data.source.category import get_categories
  26. from paddlex.ppdet.utils.logger import setup_logger
  27. logger = setup_logger(__name__)
  28. __all__ = [
  29. 'Metric', 'COCOMetric', 'VOCMetric', 'WiderFaceMetric', 'get_infer_results'
  30. ]
  31. COCO_SIGMAS = np.array([
  32. .26, .25, .25, .35, .35, .79, .79, .72, .72, .62, .62, 1.07, 1.07, .87,
  33. .87, .89, .89
  34. ]) / 10.0
  35. CROWD_SIGMAS = np.array(
  36. [.79, .79, .72, .72, .62, .62, 1.07, 1.07, .87, .87, .89, .89, .79,
  37. .79]) / 10.0
  38. class Metric(paddle.metric.Metric):
  39. def name(self):
  40. return self.__class__.__name__
  41. def reset(self):
  42. pass
  43. def accumulate(self):
  44. pass
  45. # paddle.metric.Metric defined :metch:`update`, :meth:`accumulate`
  46. # :metch:`reset`, in ppdet, we also need following 2 methods:
  47. # abstract method for logging metric results
  48. def log(self):
  49. pass
  50. # abstract method for getting metric results
  51. def get_results(self):
  52. pass
  53. class COCOMetric(Metric):
  54. def __init__(self, anno_file, **kwargs):
  55. assert os.path.isfile(anno_file), \
  56. "anno_file {} not a file".format(anno_file)
  57. self.anno_file = anno_file
  58. self.clsid2catid = kwargs.get('clsid2catid', None)
  59. if self.clsid2catid is None:
  60. self.clsid2catid, _ = get_categories('COCO', anno_file)
  61. self.classwise = kwargs.get('classwise', False)
  62. self.output_eval = kwargs.get('output_eval', None)
  63. # TODO: bias should be unified
  64. self.bias = kwargs.get('bias', 0)
  65. self.save_prediction_only = kwargs.get('save_prediction_only', False)
  66. self.iou_type = kwargs.get('IouType', 'bbox')
  67. self.reset()
  68. def reset(self):
  69. # only bbox and mask evaluation support currently
  70. self.results = {'bbox': [], 'mask': [], 'segm': [], 'keypoint': []}
  71. self.eval_results = {}
  72. def update(self, inputs, outputs):
  73. outs = {}
  74. # outputs Tensor -> numpy.ndarray
  75. for k, v in outputs.items():
  76. outs[k] = v.numpy() if isinstance(v, paddle.Tensor) else v
  77. im_id = inputs['im_id']
  78. outs['im_id'] = im_id.numpy() if isinstance(im_id,
  79. paddle.Tensor) else im_id
  80. infer_results = get_infer_results(
  81. outs, self.clsid2catid, bias=self.bias)
  82. self.results['bbox'] += infer_results[
  83. 'bbox'] if 'bbox' in infer_results else []
  84. self.results['mask'] += infer_results[
  85. 'mask'] if 'mask' in infer_results else []
  86. self.results['segm'] += infer_results[
  87. 'segm'] if 'segm' in infer_results else []
  88. self.results['keypoint'] += infer_results[
  89. 'keypoint'] if 'keypoint' in infer_results else []
  90. def accumulate(self):
  91. if len(self.results['bbox']) > 0:
  92. output = "bbox.json"
  93. if self.output_eval:
  94. output = os.path.join(self.output_eval, output)
  95. with open(output, 'w') as f:
  96. json.dump(self.results['bbox'], f)
  97. logger.info('The bbox result is saved to bbox.json.')
  98. if self.save_prediction_only:
  99. logger.info('The bbox result is saved to {} and do not '
  100. 'evaluate the mAP.'.format(output))
  101. else:
  102. bbox_stats = cocoapi_eval(
  103. output,
  104. 'bbox',
  105. anno_file=self.anno_file,
  106. classwise=self.classwise)
  107. self.eval_results['bbox'] = bbox_stats
  108. sys.stdout.flush()
  109. if len(self.results['mask']) > 0:
  110. output = "mask.json"
  111. if self.output_eval:
  112. output = os.path.join(self.output_eval, output)
  113. with open(output, 'w') as f:
  114. json.dump(self.results['mask'], f)
  115. logger.info('The mask result is saved to mask.json.')
  116. if self.save_prediction_only:
  117. logger.info('The mask result is saved to {} and do not '
  118. 'evaluate the mAP.'.format(output))
  119. else:
  120. seg_stats = cocoapi_eval(
  121. output,
  122. 'segm',
  123. anno_file=self.anno_file,
  124. classwise=self.classwise)
  125. self.eval_results['mask'] = seg_stats
  126. sys.stdout.flush()
  127. if len(self.results['segm']) > 0:
  128. output = "segm.json"
  129. if self.output_eval:
  130. output = os.path.join(self.output_eval, output)
  131. with open(output, 'w') as f:
  132. json.dump(self.results['segm'], f)
  133. logger.info('The segm result is saved to segm.json.')
  134. if self.save_prediction_only:
  135. logger.info('The segm result is saved to {} and do not '
  136. 'evaluate the mAP.'.format(output))
  137. else:
  138. seg_stats = cocoapi_eval(
  139. output,
  140. 'segm',
  141. anno_file=self.anno_file,
  142. classwise=self.classwise)
  143. self.eval_results['mask'] = seg_stats
  144. sys.stdout.flush()
  145. if len(self.results['keypoint']) > 0:
  146. output = "keypoint.json"
  147. if self.output_eval:
  148. output = os.path.join(self.output_eval, output)
  149. with open(output, 'w') as f:
  150. json.dump(self.results['keypoint'], f)
  151. logger.info('The keypoint result is saved to keypoint.json.')
  152. if self.save_prediction_only:
  153. logger.info('The keypoint result is saved to {} and do not '
  154. 'evaluate the mAP.'.format(output))
  155. else:
  156. style = 'keypoints'
  157. use_area = True
  158. sigmas = COCO_SIGMAS
  159. if self.iou_type == 'keypoints_crowd':
  160. style = 'keypoints_crowd'
  161. use_area = False
  162. sigmas = CROWD_SIGMAS
  163. keypoint_stats = cocoapi_eval(
  164. output,
  165. style,
  166. anno_file=self.anno_file,
  167. classwise=self.classwise,
  168. sigmas=sigmas,
  169. use_area=use_area)
  170. self.eval_results['keypoint'] = keypoint_stats
  171. sys.stdout.flush()
  172. def log(self):
  173. pass
  174. def get_results(self):
  175. return self.eval_results
  176. class VOCMetric(Metric):
  177. def __init__(self,
  178. label_list,
  179. class_num=20,
  180. overlap_thresh=0.5,
  181. map_type='11point',
  182. is_bbox_normalized=False,
  183. evaluate_difficult=False,
  184. classwise=False):
  185. assert os.path.isfile(label_list), \
  186. "label_list {} not a file".format(label_list)
  187. self.clsid2catid, self.catid2name = get_categories('VOC', label_list)
  188. self.overlap_thresh = overlap_thresh
  189. self.map_type = map_type
  190. self.evaluate_difficult = evaluate_difficult
  191. self.detection_map = DetectionMAP(
  192. class_num=class_num,
  193. overlap_thresh=overlap_thresh,
  194. map_type=map_type,
  195. is_bbox_normalized=is_bbox_normalized,
  196. evaluate_difficult=evaluate_difficult,
  197. catid2name=self.catid2name,
  198. classwise=classwise)
  199. self.reset()
  200. def reset(self):
  201. self.detection_map.reset()
  202. def update(self, inputs, outputs):
  203. bboxes = outputs['bbox'][:, 2:].numpy()
  204. scores = outputs['bbox'][:, 1].numpy()
  205. labels = outputs['bbox'][:, 0].numpy()
  206. bbox_lengths = outputs['bbox_num'].numpy()
  207. if bboxes.shape == (1, 1) or bboxes is None:
  208. return
  209. gt_boxes = inputs['gt_bbox']
  210. gt_labels = inputs['gt_class']
  211. difficults = inputs['difficult'] if not self.evaluate_difficult \
  212. else None
  213. scale_factor = inputs['scale_factor'].numpy(
  214. ) if 'scale_factor' in inputs else np.ones(
  215. (gt_boxes.shape[0], 2)).astype('float32')
  216. bbox_idx = 0
  217. for i in range(len(gt_boxes)):
  218. gt_box = gt_boxes[i].numpy()
  219. h, w = scale_factor[i]
  220. gt_box = gt_box / np.array([w, h, w, h])
  221. gt_label = gt_labels[i].numpy()
  222. difficult = None if difficults is None \
  223. else difficults[i].numpy()
  224. bbox_num = bbox_lengths[i]
  225. bbox = bboxes[bbox_idx:bbox_idx + bbox_num]
  226. score = scores[bbox_idx:bbox_idx + bbox_num]
  227. label = labels[bbox_idx:bbox_idx + bbox_num]
  228. gt_box, gt_label, difficult = prune_zero_padding(gt_box, gt_label,
  229. difficult)
  230. self.detection_map.update(bbox, score, label, gt_box, gt_label,
  231. difficult)
  232. bbox_idx += bbox_num
  233. def accumulate(self):
  234. logger.info("Accumulating evaluatation results...")
  235. self.detection_map.accumulate()
  236. def log(self):
  237. map_stat = 100. * self.detection_map.get_map()
  238. logger.info("mAP({:.2f}, {}) = {:.2f}%".format(
  239. self.overlap_thresh, self.map_type, map_stat))
  240. def get_results(self):
  241. return {'bbox': [self.detection_map.get_map()]}
  242. class WiderFaceMetric(Metric):
  243. def __init__(self, image_dir, anno_file, multi_scale=True):
  244. self.image_dir = image_dir
  245. self.anno_file = anno_file
  246. self.multi_scale = multi_scale
  247. self.clsid2catid, self.catid2name = get_categories('widerface')
  248. def update(self, model):
  249. face_eval_run(
  250. model,
  251. self.image_dir,
  252. self.anno_file,
  253. pred_dir='output/pred',
  254. eval_mode='widerface',
  255. multi_scale=self.multi_scale)