runner.py 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.subprocess import CompletedProcess
  18. class ClsRunner(BaseRunner):
  19. """ Cls Runner """
  20. _INFER_CONFIG_REL_PATH = os.path.join('deploy', 'configs',
  21. 'inference_cls.yaml')
  22. def train(self,
  23. config_path: str,
  24. cli_args: list,
  25. device: str,
  26. ips: str,
  27. save_dir: str,
  28. do_eval=True) -> CompletedProcess:
  29. """train model
  30. Args:
  31. config_path (str): the config file path used to train.
  32. cli_args (list): the additional parameters.
  33. device (str): the training device.
  34. ips (str): the ip addresses of nodes when using distribution.
  35. save_dir (str): the directory path to save training output.
  36. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  37. Returns:
  38. CompletedProcess: the result of training subprocess execution.
  39. """
  40. args, env = self.distributed(device, ips, log_dir=save_dir)
  41. cmd = [*args, 'tools/train.py', '-c', config_path, *cli_args]
  42. cmd.extend(['-o', f"Global.eval_during_train={do_eval}"])
  43. return self.run_cmd(
  44. cmd,
  45. env=env,
  46. switch_wdir=True,
  47. echo=True,
  48. silent=False,
  49. capture_output=True,
  50. log_path=self._get_train_log_path(save_dir))
  51. def evaluate(self, config_path: str, cli_args: list, device: str,
  52. ips: str) -> CompletedProcess:
  53. """run model evaluating
  54. Args:
  55. config_path (str): the config file path used to evaluate.
  56. cli_args (list): the additional parameters.
  57. device (str): the evaluating device.
  58. ips (str): the ip addresses of nodes when using distribution.
  59. Returns:
  60. CompletedProcess: the result of evaluating subprocess execution.
  61. """
  62. args, env = self.distributed(device, ips)
  63. cmd = [*args, 'tools/eval.py', '-c', config_path, *cli_args]
  64. cp = self.run_cmd(
  65. cmd,
  66. env=env,
  67. switch_wdir=True,
  68. echo=True,
  69. silent=False,
  70. capture_output=True)
  71. if cp.returncode == 0:
  72. metric_dict = _extract_eval_metrics(cp.stdout)
  73. cp.metrics = metric_dict
  74. return cp
  75. def predict(self, config_path: str, cli_args: list,
  76. device: str) -> CompletedProcess:
  77. """run predicting using dynamic mode
  78. Args:
  79. config_path (str): the config file path used to predict.
  80. cli_args (list): the additional parameters.
  81. device (str): unused.
  82. Returns:
  83. CompletedProcess: the result of predicting subprocess execution.
  84. """
  85. # `device` unused
  86. cmd = [self.python, 'tools/infer.py', '-c', config_path, *cli_args]
  87. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  88. def export(self,
  89. config_path: str,
  90. cli_args: list,
  91. device: str,
  92. save_dir: str=None) -> CompletedProcess:
  93. """run exporting
  94. Args:
  95. config_path (str): the path of config file used to export.
  96. cli_args (list): the additional parameters.
  97. device (str): unused.
  98. save_dir (str, optional): the directory path to save exporting output. Defaults to None.
  99. Returns:
  100. CompletedProcess: the result of exporting subprocess execution.
  101. """
  102. # `device` unused
  103. cmd = [
  104. self.python, 'tools/export_model.py', '-c', config_path, *cli_args,
  105. '-o', 'Global.export_for_fd=True', '-o',
  106. f"Global.infer_config_path={os.path.join(self.runner_root_path, self._INFER_CONFIG_REL_PATH)}"
  107. ]
  108. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  109. return cp
  110. def infer(self, config_path: str, cli_args: list,
  111. device: str) -> CompletedProcess:
  112. """run predicting using inference model
  113. Args:
  114. config_path (str): the path of config file used to predict.
  115. cli_args (list): the additional parameters.
  116. device (str): unused.
  117. Returns:
  118. CompletedProcess: the result of infering subprocess execution.
  119. """
  120. # `device` unused
  121. cmd = [
  122. self.python, 'python/predict_cls.py', '-c', config_path, *cli_args
  123. ]
  124. return self.run_cmd(cmd, switch_wdir='deploy', echo=True, silent=False)
  125. def compression(self,
  126. config_path: str,
  127. train_cli_args: list,
  128. export_cli_args: list,
  129. device: str,
  130. train_save_dir: str) -> CompletedProcess:
  131. """run compression model
  132. Args:
  133. config_path (str): the path of config file used to predict.
  134. train_cli_args (list): the additional training parameters.
  135. export_cli_args (list): the additional exporting parameters.
  136. device (str): the running device.
  137. train_save_dir (str): the directory path to save output.
  138. Returns:
  139. CompletedProcess: the result of compression subprocess execution.
  140. """
  141. # Step 1: Train model
  142. cp_train = self.train(config_path, train_cli_args, device, None,
  143. train_save_dir)
  144. # Step 2: Export model
  145. weight_path = os.path.join(train_save_dir, 'best_model', 'model')
  146. export_cli_args = [
  147. *export_cli_args, '-o', f"Global.pretrained_model={weight_path}"
  148. ]
  149. cp_export = self.export(config_path, export_cli_args, device)
  150. return cp_train, cp_export
  151. def _extract_eval_metrics(stdout: str) -> dict:
  152. """extract evaluation metrics from training log
  153. Args:
  154. stdout (str): the training log
  155. Returns:
  156. dict: the training metric
  157. """
  158. import re
  159. _DP = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
  160. patterns = [
  161. r'\[Eval\]\[Epoch 0\]\[Avg\].*top1: (_dp), top5: (_dp)'.replace('_dp',
  162. _DP),
  163. r'\[Eval\]\[Epoch 0\]\[Avg\].*recall1: (_dp), recall5: (_dp), mAP: (_dp)'.
  164. replace('_dp', _DP),
  165. ]
  166. keys = [['val.top1', 'val.top5'], ['recall1', 'recall5', 'mAP']]
  167. metric_dict = dict()
  168. for pattern, key in zip(patterns, keys):
  169. pattern = re.compile(pattern)
  170. for line in stdout.splitlines():
  171. match = pattern.search(line)
  172. if match:
  173. for k, v in zip(key, map(float, match.groups())):
  174. metric_dict[k] = v
  175. return metric_dict