runner.py 7.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.arg import CLIArgument, gather_opts_args
  18. from ...base.utils.subprocess import CompletedProcess
  19. class DetRunner(BaseRunner):
  20. """DetRunner"""
  21. def train(
  22. self,
  23. config_path: str,
  24. cli_args: list,
  25. device: str,
  26. ips: str,
  27. save_dir: str,
  28. do_eval=True,
  29. ) -> CompletedProcess:
  30. """train model
  31. Args:
  32. config_path (str): the config file path used to train.
  33. cli_args (list): the additional parameters.
  34. device (str): the training device.
  35. ips (str): the ip addresses of nodes when using distribution.
  36. save_dir (str): the directory path to save training output.
  37. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  38. Returns:
  39. CompletedProcess: the result of training subprocess execution.
  40. """
  41. args, env = self.distributed(device, ips, log_dir=save_dir)
  42. cli_args = self._gather_opts_args(cli_args)
  43. cmd = [*args, "tools/train.py"]
  44. if do_eval:
  45. cmd.append("--eval")
  46. cmd.extend(["--config", config_path, *cli_args])
  47. return self.run_cmd(
  48. cmd,
  49. env=env,
  50. switch_wdir=True,
  51. echo=True,
  52. silent=False,
  53. capture_output=True,
  54. log_path=self._get_train_log_path(save_dir),
  55. )
  56. def evaluate(
  57. self, config_path: str, cli_args: list, device: str, ips: str
  58. ) -> CompletedProcess:
  59. """run model evaluating
  60. Args:
  61. config_path (str): the config file path used to evaluate.
  62. cli_args (list): the additional parameters.
  63. device (str): the evaluating device.
  64. ips (str): the ip addresses of nodes when using distribution.
  65. Returns:
  66. CompletedProcess: the result of evaluating subprocess execution.
  67. """
  68. args, env = self.distributed(device, ips)
  69. cli_args = self._gather_opts_args(cli_args)
  70. cmd = [*args, "tools/eval.py", "--config", config_path, *cli_args]
  71. cp = self.run_cmd(
  72. cmd, env=env, switch_wdir=True, echo=True, silent=False, capture_output=True
  73. )
  74. if cp.returncode == 0:
  75. metric_dict = _extract_eval_metrics(cp.stdout)
  76. cp.metrics = metric_dict
  77. return cp
  78. def predict(
  79. self, config_path: str, cli_args: list, device: str
  80. ) -> CompletedProcess:
  81. """run predicting using dynamic mode
  82. Args:
  83. config_path (str): the config file path used to predict.
  84. cli_args (list): the additional parameters.
  85. device (str): unused.
  86. Returns:
  87. CompletedProcess: the result of predicting subprocess execution.
  88. """
  89. # `device` unused
  90. cli_args = self._gather_opts_args(cli_args)
  91. cmd = [self.python, "tools/infer.py", "-c", config_path, *cli_args]
  92. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  93. def export(self, config_path: str, cli_args: list, device: str) -> CompletedProcess:
  94. """run exporting
  95. Args:
  96. config_path (str): the path of config file used to export.
  97. cli_args (list): the additional parameters.
  98. device (str): unused.
  99. save_dir (str, optional): the directory path to save exporting output. Defaults to None.
  100. Returns:
  101. CompletedProcess: the result of exporting subprocess execution.
  102. """
  103. # `device` unused
  104. cli_args = self._gather_opts_args(cli_args)
  105. cmd = [
  106. self.python,
  107. "tools/export_model.py",
  108. "--for_fd",
  109. "-c",
  110. config_path,
  111. *cli_args,
  112. ]
  113. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  114. return cp
  115. def infer(self, cli_args: list, device: str) -> CompletedProcess:
  116. """run predicting using inference model
  117. Args:
  118. cli_args (list): the additional parameters.
  119. device (str): unused.
  120. Returns:
  121. CompletedProcess: the result of infering subprocess execution.
  122. """
  123. # `device` unused
  124. cmd = [self.python, "deploy/python/infer.py", "--use_fd_format", *cli_args]
  125. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  126. def compression(
  127. self,
  128. config_path: str,
  129. train_cli_args: list,
  130. export_cli_args: list,
  131. device: str,
  132. train_save_dir: str,
  133. ) -> CompletedProcess:
  134. """run compression model
  135. Args:
  136. config_path (str): the path of config file used to predict.
  137. train_cli_args (list): the additional training parameters.
  138. export_cli_args (list): the additional exporting parameters.
  139. device (str): the running device.
  140. train_save_dir (str): the directory path to save output.
  141. Returns:
  142. CompletedProcess: the result of compression subprocess execution.
  143. """
  144. args, env = self.distributed(device, log_dir=train_save_dir)
  145. train_cli_args = self._gather_opts_args(train_cli_args)
  146. cmd = [*args, "tools/train.py", "-c", config_path, *train_cli_args]
  147. cp_train = self.run_cmd(
  148. cmd,
  149. env=env,
  150. switch_wdir=True,
  151. echo=True,
  152. silent=False,
  153. capture_output=True,
  154. log_path=self._get_train_log_path(train_save_dir),
  155. )
  156. cps_weight_path = os.path.join(train_save_dir, "model_final")
  157. export_cli_args.append(CLIArgument("-o", f"weights={cps_weight_path}"))
  158. export_cli_args = self._gather_opts_args(export_cli_args)
  159. cmd = [
  160. self.python,
  161. "tools/export_model.py",
  162. "--for_fd",
  163. "-c",
  164. config_path,
  165. *export_cli_args,
  166. ]
  167. cp_export = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  168. return cp_train, cp_export
  169. def _gather_opts_args(self, args):
  170. """_gather_opts_args"""
  171. return gather_opts_args(args, "-o")
  172. def _extract_eval_metrics(stdout):
  173. """extract evaluation metrics from training log
  174. Args:
  175. stdout (str): the training log
  176. Returns:
  177. dict: the training metric
  178. """
  179. import re
  180. pattern = r".*\(AP\)\s*@\[\s*IoU=0\.50:0\.95\s*\|\s*area=\s*all\s\|\smaxDets=\s*\d+\s\]\s*=\s*[0-1]?\.[0-9]{3}$"
  181. key = "AP"
  182. metric_dict = dict()
  183. pattern = re.compile(pattern)
  184. # TODO: Use lazy version to make it more efficient
  185. lines = stdout.splitlines()
  186. metric_dict[key] = 0
  187. for line in lines:
  188. match = pattern.search(line)
  189. if match:
  190. metric_dict[key] = float(match.group(0)[-5:])
  191. return metric_dict