benchmark.py 4.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. import functools
  15. from types import GeneratorType
  16. import time
  17. import numpy as np
  18. from prettytable import PrettyTable
  19. from ...utils.flags import INFER_BENCHMARK_OUTPUT
  20. from ...utils import logging
  21. class Benchmark:
  22. def __init__(self, components):
  23. self._components = components
  24. def reset(self):
  25. for name in self._components:
  26. cmp = self._components[name]
  27. cmp.timer.reset()
  28. def gather(self):
  29. # lazy import for avoiding circular import
  30. from ..components.paddle_predictor import BasePaddlePredictor
  31. detail = []
  32. summary = {"preprocess": 0, "inference": 0, "postprocess": 0}
  33. op_tag = "preprocess"
  34. for name in self._components:
  35. cmp = self._components[name]
  36. times = cmp.timer.logs
  37. counts = len(times)
  38. avg = np.mean(times) * 1000
  39. detail.append((name, counts, avg))
  40. if isinstance(cmp, BasePaddlePredictor):
  41. summary["inference"] += avg
  42. op_tag = "postprocess"
  43. else:
  44. summary[op_tag] += avg
  45. return detail, summary
  46. def collect(self):
  47. detail, summary = self.gather()
  48. table = PrettyTable(["Component", "Counts", "Average Time(ms)"])
  49. table.add_rows([(name, cnts, f"{avg:.8f}") for name, cnts, avg in detail])
  50. table.add_row(("***************", "******", "***************"))
  51. table.add_row(("PreProcess", "\\", f"{summary['preprocess']:.8f}"))
  52. table.add_row(("Inference", "\\", f"{summary['inference']:.8f}"))
  53. table.add_row(("PostProcess", "\\", f"{summary['postprocess']:.8f}"))
  54. logging.info(table)
  55. if INFER_BENCHMARK_OUTPUT:
  56. str_ = "Component, Counts, Average Time(ms)\n"
  57. str_ += "\n".join(
  58. [f"{name}, {cnts}, {avg:.18f}" for name, cnts, avg in detail]
  59. )
  60. str_ += "\n***************, ***, ***************\n"
  61. str_ += "\n".join(
  62. [
  63. f"PreProcess, \, {summary['preprocess']:.18f}",
  64. f"Inference, \, {summary['inference']:.18f}",
  65. f"PostProcess, \, {summary['postprocess']:.18f}",
  66. ]
  67. )
  68. with open(INFER_BENCHMARK_OUTPUT, "w") as f:
  69. f.write(str_)
  70. class Timer:
  71. def __init__(self):
  72. self._tic = None
  73. self._elapses = []
  74. def watch_func(self, func):
  75. @functools.wraps(func)
  76. def wrapper(*args, **kwargs):
  77. tic = time.time()
  78. output = func(*args, **kwargs)
  79. if isinstance(output, GeneratorType):
  80. return self.watch_generator(output)
  81. else:
  82. self._update(time.time() - tic)
  83. return output
  84. return wrapper
  85. def watch_generator(self, generator):
  86. @functools.wraps(generator)
  87. def wrapper():
  88. while 1:
  89. try:
  90. tic = time.time()
  91. item = next(generator)
  92. self._update(time.time() - tic)
  93. yield item
  94. except StopIteration:
  95. break
  96. return wrapper()
  97. def reset(self):
  98. self._tic = None
  99. self._elapses = []
  100. def _update(self, elapse):
  101. self._elapses.append(elapse)
  102. @property
  103. def logs(self):
  104. return self._elapses