evaluator.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. from pathlib import Path
  16. from abc import ABC, abstractmethod
  17. from .build_model import build_model
  18. from ...utils.device import get_device
  19. from ...utils.misc import AutoRegisterABCMetaClass
  20. from ...utils.config import AttrDict
  21. from ...utils.logging import *
  22. def build_evaluater(config: AttrDict) -> "BaseEvaluator":
  23. """build model evaluater
  24. Args:
  25. config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
  26. Returns:
  27. BaseEvaluator: the evaluater, which is subclass of BaseEvaluator.
  28. """
  29. model_name = config.Global.model
  30. return BaseEvaluator.get(model_name)(config)
  31. class BaseEvaluator(ABC, metaclass=AutoRegisterABCMetaClass):
  32. """ Base Model Evaluator """
  33. __is_base = True
  34. def __init__(self, config):
  35. """Initialize the instance.
  36. Args:
  37. config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
  38. """
  39. super().__init__()
  40. self.global_config = config.Global
  41. self.eval_config = config.Evaluate
  42. config_path = self.get_config_path(self.eval_config.weight_path)
  43. if not config_path.exists():
  44. warning(
  45. f"The config file(`{config_path}`) related to weight file(`{self.eval_config.weight_path}`) is not exist, use default instead."
  46. )
  47. config_path = None
  48. self.pdx_config, self.pdx_model = build_model(
  49. self.global_config.model, config_path=config_path)
  50. def get_config_path(self, weight_path):
  51. """
  52. get config path
  53. Args:
  54. weight_path (str): The path to the weight
  55. Returns:
  56. config_path (str): The path to the config
  57. """
  58. config_path = Path(weight_path).parent / "config.yaml"
  59. return config_path
  60. def check_return(self, metrics: dict) -> bool:
  61. """check evaluation metrics
  62. Args:
  63. metrics (dict): evaluation output metrics
  64. Returns:
  65. bool: whether the format of evaluation metrics is legal
  66. """
  67. if not isinstance(metrics, dict):
  68. return False
  69. for metric in metrics:
  70. val = metrics[metric]
  71. if not isinstance(val, float):
  72. return False
  73. return True
  74. def __call__(self) -> dict:
  75. """execute model training
  76. Returns:
  77. dict: the evaluation metrics
  78. """
  79. metrics = self.eval()
  80. assert self.check_return(
  81. metrics
  82. ), f"The return value({metrics}) of Evaluator.eval() is illegal!"
  83. return {"metrics": metrics}
  84. def dump_config(self, config_file_path=None):
  85. """dump the config
  86. Args:
  87. config_file_path (str, optional): the path to save dumped config.
  88. Defaults to None, means that save in `Global.output` as `config.yaml`.
  89. """
  90. if config_file_path is None:
  91. config_file_path = os.path.join(self.global_config.output,
  92. "config.yaml")
  93. self.pdx_config.dump(config_file_path)
  94. def eval(self):
  95. """firstly, update evaluation config, then evaluate model, finally return the evaluation result
  96. """
  97. self.update_config()
  98. # self.dump_config()
  99. evaluate_result = self.pdx_model.evaluate(**self.get_eval_kwargs())
  100. assert evaluate_result.returncode == 0, f"Encountered an unexpected error({evaluate_result.returncode}) in \
  101. evaling!"
  102. return evaluate_result.metrics
  103. def get_device(self, using_device_number: int=None) -> str:
  104. """get device setting from config
  105. Args:
  106. using_device_number (int, optional): specify device number to use.
  107. Defaults to None, means that base on config setting.
  108. Returns:
  109. str: device setting, such as: `gpu:0,1`, `npu:0,1`, `cpu`.
  110. """
  111. return get_device(
  112. self.global_config.device, using_device_number=using_device_number)
  113. @abstractmethod
  114. def update_config(self):
  115. """update evalution config
  116. """
  117. raise NotImplementedError
  118. @abstractmethod
  119. def get_eval_kwargs(self):
  120. """get key-value arguments of model evalution function
  121. """
  122. raise NotImplementedError