runner.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import os
  15. import tempfile
  16. from ...base import BaseRunner
  17. from ...base.utils.arg import gather_opts_args
  18. from ...base.utils.subprocess import CompletedProcess
  19. class SegRunner(BaseRunner):
  20. """ Semantic Segmentation Runner """
  21. def train(self,
  22. config_path: str,
  23. cli_args: list,
  24. device: str,
  25. ips: str,
  26. save_dir: str,
  27. do_eval=True) -> CompletedProcess:
  28. """train model
  29. Args:
  30. config_path (str): the config file path used to train.
  31. cli_args (list): the additional parameters.
  32. device (str): the training device.
  33. ips (str): the ip addresses of nodes when using distribution.
  34. save_dir (str): the directory path to save training output.
  35. do_eval (bool, optional): whether or not to evaluate model during training. Defaults to True.
  36. Returns:
  37. CompletedProcess: the result of training subprocess execution.
  38. """
  39. args, env = self.distributed(device, ips, log_dir=save_dir)
  40. cli_args = self._gather_opts_args(cli_args)
  41. cmd = [*args, 'tools/train.py']
  42. if do_eval:
  43. cmd.append('--do_eval')
  44. cmd.extend(['--config', config_path, *cli_args])
  45. return self.run_cmd(
  46. cmd,
  47. env=env,
  48. switch_wdir=True,
  49. echo=True,
  50. silent=False,
  51. capture_output=True,
  52. log_path=self._get_train_log_path(save_dir))
  53. def evaluate(self, config_path: str, cli_args: list, device: str,
  54. ips: str) -> CompletedProcess:
  55. """run model evaluating
  56. Args:
  57. config_path (str): the config file path used to evaluate.
  58. cli_args (list): the additional parameters.
  59. device (str): the evaluating device.
  60. ips (str): the ip addresses of nodes when using distribution.
  61. Returns:
  62. CompletedProcess: the result of evaluating subprocess execution.
  63. """
  64. args, env = self.distributed(device, ips)
  65. cli_args = self._gather_opts_args(cli_args)
  66. cmd = [*args, 'tools/val.py', '--config', config_path, *cli_args]
  67. cp = self.run_cmd(
  68. cmd,
  69. env=env,
  70. switch_wdir=True,
  71. echo=True,
  72. silent=False,
  73. capture_output=True)
  74. if cp.returncode == 0:
  75. metric_dict = _extract_eval_metrics(cp.stdout)
  76. cp.metrics = metric_dict
  77. return cp
  78. def predict(self, config_path: str, cli_args: list,
  79. device: str) -> CompletedProcess:
  80. """run predicting using dynamic mode
  81. Args:
  82. config_path (str): the config file path used to predict.
  83. cli_args (list): the additional parameters.
  84. device (str): unused.
  85. Returns:
  86. CompletedProcess: the result of predicting subprocess execution.
  87. """
  88. # `device` unused
  89. cli_args = self._gather_opts_args(cli_args)
  90. cmd = [
  91. self.python, 'tools/predict.py', '--config', config_path, *cli_args
  92. ]
  93. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  94. def analyse(self, config_path, cli_args, device, ips):
  95. """ analyse """
  96. args, env = self.distributed(device, ips)
  97. cli_args = self._gather_opts_args(cli_args)
  98. cmd = [*args, 'tools/analyse.py', '--config', config_path, *cli_args]
  99. cp = self.run_cmd(
  100. cmd,
  101. env=env,
  102. switch_wdir=True,
  103. echo=True,
  104. silent=False,
  105. capture_output=True)
  106. return cp
  107. def export(self, config_path: str, cli_args: list,
  108. device: str) -> CompletedProcess:
  109. """run exporting
  110. Args:
  111. config_path (str): the path of config file used to export.
  112. cli_args (list): the additional parameters.
  113. device (str): unused.
  114. Returns:
  115. CompletedProcess: the result of exporting subprocess execution.
  116. """
  117. # `device` unused
  118. cli_args = self._gather_opts_args(cli_args)
  119. cmd = [
  120. self.python, 'tools/export.py', '--for_fd', '--config', config_path,
  121. *cli_args
  122. ]
  123. cp = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  124. return cp
  125. def infer(self, config_path: str, cli_args: list,
  126. device: str) -> CompletedProcess:
  127. """run predicting using inference model
  128. Args:
  129. config_path (str): the path of config file used to predict.
  130. cli_args (list): the additional parameters.
  131. device (str): unused.
  132. Returns:
  133. CompletedProcess: the result of infering subprocess execution.
  134. """
  135. # `device` unused
  136. cli_args = self._gather_opts_args(cli_args)
  137. cmd = [
  138. self.python, 'deploy/python/infer.py', '--config', config_path,
  139. *cli_args
  140. ]
  141. return self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  142. def compression(self,
  143. config_path: str,
  144. train_cli_args: list,
  145. export_cli_args: list,
  146. device: str,
  147. train_save_dir: str) -> CompletedProcess:
  148. """run compression model
  149. Args:
  150. config_path (str): the path of config file used to predict.
  151. train_cli_args (list): the additional training parameters.
  152. export_cli_args (list): the additional exporting parameters.
  153. device (str): the running device.
  154. train_save_dir (str): the directory path to save output.
  155. Returns:
  156. CompletedProcess: the result of compression subprocess execution.
  157. """
  158. # Step 1: Train model
  159. args, env = self.distributed(device, log_dir=train_save_dir)
  160. train_cli_args = self._gather_opts_args(train_cli_args)
  161. # Note that we add `--do_eval` here so we can have `train_save_dir/best_model/model.pdparams` saved
  162. cmd = [
  163. *args, 'deploy/slim/quant/qat_train.py', '--do_eval', '--config',
  164. config_path, *train_cli_args
  165. ]
  166. cp_train = self.run_cmd(
  167. cmd,
  168. env=env,
  169. switch_wdir=True,
  170. echo=True,
  171. silent=False,
  172. capture_output=True,
  173. log_path=self._get_train_log_path(train_save_dir))
  174. # Step 2: Export model
  175. export_cli_args = self._gather_opts_args(export_cli_args)
  176. # We export the best model on the validation dataset
  177. weight_path = os.path.join(train_save_dir, 'best_model',
  178. 'model.pdparams')
  179. cmd = [
  180. self.python, 'deploy/slim/quant/qat_export.py', '--for_fd',
  181. '--config', config_path, '--model_path', weight_path,
  182. *export_cli_args
  183. ]
  184. cp_export = self.run_cmd(cmd, switch_wdir=True, echo=True, silent=False)
  185. return cp_train, cp_export
  186. def _gather_opts_args(self, args):
  187. # Since `--opts` in PaddleSeg does not use `action='append'`
  188. # We collect and arrange all opts args here
  189. # e.g.: python tools/train.py --config xxx --opts a=1 c=3 --opts b=2
  190. # => python tools/train.py --config xxx c=3 --opts a=1 b=2
  191. return gather_opts_args(args, '--opts')
  192. def _extract_eval_metrics(stdout: str) -> dict:
  193. """extract evaluation metrics from training log
  194. Args:
  195. stdout (str): the training log
  196. Returns:
  197. dict: the training metric
  198. """
  199. import re
  200. _DP = r'[-+]?[0-9]*\.?[0-9]+(?:[eE][-+]?[0-9]+)?'
  201. pattern = r'Images: \d+ mIoU: (_dp) Acc: (_dp) Kappa: (_dp) Dice: (_dp)'.replace(
  202. '_dp', _DP)
  203. keys = ['mIoU', 'Acc', 'Kappa', 'Dice']
  204. metric_dict = dict()
  205. pattern = re.compile(pattern)
  206. # TODO: Use lazy version to make it more efficient
  207. lines = stdout.splitlines()
  208. for line in lines:
  209. match = pattern.search(line)
  210. if match:
  211. for k, v in zip(keys, map(float, match.groups())):
  212. metric_dict[k] = v
  213. return metric_dict