basic_predictor.py 5.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Dict, Any, Iterator
  15. from abc import abstractmethod
  16. from .....utils.subclass_register import AutoRegisterABCMetaClass
  17. from .....utils.flags import (
  18. INFER_BENCHMARK,
  19. INFER_BENCHMARK_WARMUP,
  20. INFER_BENCHMARK_ITERS,
  21. )
  22. from .....utils import logging
  23. from ....utils.pp_option import PaddlePredictorOption
  24. from ....utils.benchmark import benchmark, ENTRY_POINT_NAME
  25. from .base_predictor import BasePredictor
  26. class BasicPredictor(
  27. BasePredictor,
  28. metaclass=AutoRegisterABCMetaClass,
  29. ):
  30. """BasicPredictor."""
  31. __is_base = True
  32. def __init__(
  33. self,
  34. model_dir: str,
  35. config: Dict[str, Any] = None,
  36. device: str = None,
  37. batch_size: int = 1,
  38. pp_option: PaddlePredictorOption = None,
  39. ) -> None:
  40. """Initializes the BasicPredictor.
  41. Args:
  42. model_dir (str): The directory where the model files are stored.
  43. config (Dict[str, Any], optional): The configuration dictionary. Defaults to None.
  44. device (str, optional): The device to run the inference engine on. Defaults to None.
  45. batch_size (int, optional): The batch size to predict. Defaults to 1.
  46. pp_option (PaddlePredictorOption, optional): The inference engine options. Defaults to None.
  47. """
  48. super().__init__(model_dir=model_dir, config=config)
  49. if not pp_option:
  50. pp_option = PaddlePredictorOption(model_name=self.model_name)
  51. if device:
  52. pp_option.set_device(device)
  53. trt_dynamic_shapes = (
  54. self.config.get("Hpi", {})
  55. .get("backend_configs", {})
  56. .get("paddle_infer", {})
  57. .get("trt_dynamic_shapes", None)
  58. )
  59. if trt_dynamic_shapes:
  60. pp_option.trt_dynamic_shapes = trt_dynamic_shapes
  61. trt_dynamic_shape_input_data = (
  62. self.config.get("Hpi", {})
  63. .get("backend_configs", {})
  64. .get("paddle_infer", {})
  65. .get("trt_dynamic_shape_input_data", None)
  66. )
  67. if trt_dynamic_shape_input_data:
  68. pp_option.trt_dynamic_shape_input_data = trt_dynamic_shape_input_data
  69. self.pp_option = pp_option
  70. self.pp_option.batch_size = batch_size
  71. self.batch_sampler.batch_size = batch_size
  72. logging.debug(f"{self.__class__.__name__}: {self.model_dir}")
  73. def __call__(
  74. self,
  75. input: Any,
  76. batch_size: int = None,
  77. device: str = None,
  78. pp_option: PaddlePredictorOption = None,
  79. **kwargs: Dict[str, Any],
  80. ) -> Iterator[Any]:
  81. """
  82. Predict with the input data.
  83. Args:
  84. input (Any): The input data to be predicted.
  85. batch_size (int, optional): The batch size to use. Defaults to None.
  86. device (str, optional): The device to run the predictor on. Defaults to None.
  87. pp_option (PaddlePredictorOption, optional): The predictor options to set. Defaults to None.
  88. **kwargs (Dict[str, Any]): Additional keyword arguments to set up predictor.
  89. Returns:
  90. Iterator[Any]: An iterator yielding the prediction output.
  91. """
  92. self.set_predictor(batch_size, device, pp_option)
  93. if INFER_BENCHMARK:
  94. # TODO(zhang-prog): Get metadata of input data
  95. @benchmark.timeit_with_options(name=ENTRY_POINT_NAME)
  96. def _apply(input, **kwargs):
  97. return list(self.apply(input, **kwargs))
  98. if isinstance(input, list):
  99. raise TypeError("`input` cannot be a list in benchmark mode")
  100. input = [input] * batch_size
  101. if not (INFER_BENCHMARK_WARMUP > 0 or INFER_BENCHMARK_ITERS > 0):
  102. raise RuntimeError(
  103. "At least one of `INFER_BENCHMARK_WARMUP` and `INFER_BENCHMARK_ITERS` must be greater than zero"
  104. )
  105. if INFER_BENCHMARK_WARMUP > 0:
  106. benchmark.start_warmup()
  107. for _ in range(INFER_BENCHMARK_WARMUP):
  108. output = _apply(input, **kwargs)
  109. benchmark.collect(batch_size)
  110. benchmark.stop_warmup()
  111. if INFER_BENCHMARK_ITERS > 0:
  112. for _ in range(INFER_BENCHMARK_ITERS):
  113. output = _apply(input, **kwargs)
  114. benchmark.collect(batch_size)
  115. yield output[0]
  116. else:
  117. yield from self.apply(input, **kwargs)
  118. def set_predictor(
  119. self,
  120. batch_size: int = None,
  121. device: str = None,
  122. pp_option: PaddlePredictorOption = None,
  123. ) -> None:
  124. """
  125. Sets the predictor configuration.
  126. Args:
  127. batch_size (int, optional): The batch size to use. Defaults to None.
  128. device (str, optional): The device to run the predictor on. Defaults to None.
  129. pp_option (PaddlePredictorOption, optional): The predictor options to set. Defaults to None.
  130. Returns:
  131. None
  132. """
  133. if batch_size:
  134. self.batch_sampler.batch_size = batch_size
  135. self.pp_option.batch_size = batch_size
  136. if device and device != self.pp_option.device:
  137. self.pp_option.set_device(device)
  138. if pp_option and pp_option != self.pp_option:
  139. self.pp_option = pp_option