predictor.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, List, Sequence, Optional, Union, Tuple
  15. import numpy as np
  16. from ....utils.func_register import FuncRegister
  17. from ....modules.object_detection.model_list import MODELS
  18. from ...common.batch_sampler import ImageBatchSampler
  19. from ..common import StaticInfer
  20. from ..base import BasicPredictor
  21. from .processors import (
  22. DetPad,
  23. DetPostProcess,
  24. Normalize,
  25. PadStride,
  26. ReadImage,
  27. Resize,
  28. ToBatch,
  29. ToCHWImage,
  30. WarpAffine,
  31. )
  32. from .result import DetResult
  33. from .utils import STATIC_SHAPE_MODEL_LIST
  34. class DetPredictor(BasicPredictor):
  35. entities = MODELS
  36. _FUNC_MAP = {}
  37. register = FuncRegister(_FUNC_MAP)
  38. def __init__(
  39. self,
  40. *args,
  41. img_size: Optional[Union[int, Tuple[int, int]]] = None,
  42. threshold: Optional[Union[float, dict]] = None,
  43. layout_nms: Optional[bool] = None,
  44. layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
  45. layout_merge_bboxes_mode: Optional[Union[str, dict]] = None,
  46. **kwargs,
  47. ):
  48. """Initializes DetPredictor.
  49. Args:
  50. *args: Arbitrary positional arguments passed to the superclass.
  51. img_size (Optional[Union[int, Tuple[int, int]]], optional): The input image size (w, h). Defaults to None.
  52. threshold (Optional[float], optional): The threshold for filtering out low-confidence predictions.
  53. Defaults to None.
  54. layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to False.
  55. layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
  56. Defaults to None.
  57. If it's a single number, then both width and height are used.
  58. If it's a tuple of two numbers, then they are used separately for width and height respectively.
  59. If it's None, then no unclipping will be performed.
  60. layout_merge_bboxes_mode (Optional[Union[str, dict]], optional): The mode for merging bounding boxes. Defaults to None.
  61. **kwargs: Arbitrary keyword arguments passed to the superclass.
  62. """
  63. super().__init__(*args, **kwargs)
  64. if img_size is not None:
  65. assert (
  66. self.model_name not in STATIC_SHAPE_MODEL_LIST
  67. ), f"The model {self.model_name} is not supported set input shape"
  68. if isinstance(img_size, int):
  69. img_size = (img_size, img_size)
  70. elif isinstance(img_size, (tuple, list)):
  71. assert len(img_size) == 2, f"The length of `img_size` should be 2."
  72. else:
  73. raise ValueError(
  74. f"The type of `img_size` must be int or Tuple[int, int], but got {type(img_size)}."
  75. )
  76. if layout_unclip_ratio is not None:
  77. if isinstance(layout_unclip_ratio, float):
  78. layout_unclip_ratio = (layout_unclip_ratio, layout_unclip_ratio)
  79. elif isinstance(layout_unclip_ratio, (tuple, list)):
  80. assert (
  81. len(layout_unclip_ratio) == 2
  82. ), f"The length of `layout_unclip_ratio` should be 2."
  83. elif isinstance(layout_unclip_ratio, dict):
  84. pass
  85. else:
  86. raise ValueError(
  87. f"The type of `layout_unclip_ratio` must be float, Tuple[float, float] or Dict, but got {type(layout_unclip_ratio)}."
  88. )
  89. if layout_merge_bboxes_mode is not None:
  90. if isinstance(layout_merge_bboxes_mode, str):
  91. assert layout_merge_bboxes_mode in [
  92. "union",
  93. "large",
  94. "small",
  95. ], f"The value of `layout_merge_bboxes_mode` must be one of ['union', 'large', 'small'] or a dict, but got {layout_merge_bboxes_mode}"
  96. self.img_size = img_size
  97. self.threshold = threshold
  98. self.layout_nms = layout_nms
  99. self.layout_unclip_ratio = layout_unclip_ratio
  100. self.layout_merge_bboxes_mode = layout_merge_bboxes_mode
  101. self.pre_ops, self.infer, self.post_op = self._build()
  102. def _build_batch_sampler(self):
  103. return ImageBatchSampler()
  104. def _get_result_class(self):
  105. return DetResult
  106. def _build(self) -> Tuple:
  107. """Build the preprocessors, inference engine, and postprocessors based on the configuration.
  108. Returns:
  109. tuple: A tuple containing the preprocessors, inference engine, and postprocessors.
  110. """
  111. # build preprocess ops
  112. pre_ops = [ReadImage(format="RGB")]
  113. for cfg in self.config["Preprocess"]:
  114. tf_key = cfg["type"]
  115. func = self._FUNC_MAP[tf_key]
  116. cfg.pop("type")
  117. args = cfg
  118. op = func(self, **args) if args else func(self)
  119. if op:
  120. pre_ops.append(op)
  121. pre_ops.append(self.build_to_batch())
  122. if self.img_size is not None:
  123. if isinstance(pre_ops[1], Resize):
  124. pre_ops.pop(1)
  125. pre_ops.insert(1, self.build_resize(self.img_size, False, 2))
  126. # build infer
  127. infer = StaticInfer(
  128. model_dir=self.model_dir,
  129. model_prefix=self.MODEL_FILE_PREFIX,
  130. option=self.pp_option,
  131. )
  132. # build postprocess op
  133. post_op = self.build_postprocess()
  134. return pre_ops, infer, post_op
  135. def _format_output(self, pred: Sequence[Any]) -> List[dict]:
  136. """
  137. Transform batch outputs into a list of single image output.
  138. Args:
  139. pred (Sequence[Any]): The input predictions, which can be either a list of 3 or 4 elements.
  140. - When len(pred) == 4, it is expected to be in the format [boxes, class_ids, scores, masks],
  141. compatible with SOLOv2 output.
  142. - When len(pred) == 3, it is expected to be in the format [boxes, box_nums, masks],
  143. compatible with Instance Segmentation output.
  144. Returns:
  145. List[dict]: A list of dictionaries, each containing either 'class_id' and 'masks' (for SOLOv2),
  146. or 'boxes' and 'masks' (for Instance Segmentation), or just 'boxes' if no masks are provided.
  147. """
  148. box_idx_start = 0
  149. pred_box = []
  150. if len(pred) == 4:
  151. # Adapt to SOLOv2
  152. pred_class_id = []
  153. pred_mask = []
  154. pred_class_id.append([pred[1], pred[2]])
  155. pred_mask.append(pred[3])
  156. return [
  157. {
  158. "class_id": np.array(pred_class_id[i]),
  159. "masks": np.array(pred_mask[i]),
  160. }
  161. for i in range(len(pred_class_id))
  162. ]
  163. if len(pred) == 3:
  164. # Adapt to Instance Segmentation
  165. pred_mask = []
  166. for idx in range(len(pred[1])):
  167. np_boxes_num = pred[1][idx]
  168. box_idx_end = box_idx_start + np_boxes_num
  169. np_boxes = pred[0][box_idx_start:box_idx_end]
  170. pred_box.append(np_boxes)
  171. if len(pred) == 3:
  172. np_masks = pred[2][box_idx_start:box_idx_end]
  173. pred_mask.append(np_masks)
  174. box_idx_start = box_idx_end
  175. if len(pred) == 3:
  176. return [
  177. {"boxes": np.array(pred_box[i]), "masks": np.array(pred_mask[i])}
  178. for i in range(len(pred_box))
  179. ]
  180. else:
  181. return [{"boxes": np.array(res)} for res in pred_box]
  182. def process(
  183. self,
  184. batch_data: List[Any],
  185. threshold: Optional[Union[float, dict]] = None,
  186. layout_nms: bool = False,
  187. layout_unclip_ratio: Optional[Union[float, Tuple[float, float], dict]] = None,
  188. layout_merge_bboxes_mode: Optional[Union[str, dict]] = None,
  189. ):
  190. """
  191. Process a batch of data through the preprocessing, inference, and postprocessing.
  192. Args:
  193. batch_data (List[Union[str, np.ndarray], ...]): A batch of input data (e.g., image file paths).
  194. threshold (Optional[float, dict], optional): The threshold for filtering out low-confidence predictions.
  195. layout_nms (bool, optional): Whether to use layout-aware NMS. Defaults to None.
  196. layout_unclip_ratio (Optional[Union[float, Tuple[float, float]]], optional): The ratio of unclipping the bounding box.
  197. layout_merge_bboxes_mode (Optional[Union[str, dict]], optional): The mode for merging bounding boxes. Defaults to None.
  198. Returns:
  199. dict: A dictionary containing the input path, raw image, class IDs, scores, and label names
  200. for every instance of the batch. Keys include 'input_path', 'input_img', 'class_ids', 'scores', and 'label_names'.
  201. """
  202. datas = batch_data.instances
  203. # preprocess
  204. for pre_op in self.pre_ops[:-1]:
  205. datas = pre_op(datas)
  206. # use `ToBatch` format batch inputs
  207. batch_inputs = self.pre_ops[-1](datas)
  208. # do infer
  209. batch_preds = self.infer(batch_inputs)
  210. # process a batch of predictions into a list of single image result
  211. preds_list = self._format_output(batch_preds)
  212. # postprocess
  213. boxes = self.post_op(
  214. preds_list,
  215. datas,
  216. threshold=threshold or self.threshold,
  217. layout_nms=layout_nms or self.layout_nms,
  218. layout_unclip_ratio=layout_unclip_ratio or self.layout_unclip_ratio,
  219. layout_merge_bboxes_mode=layout_merge_bboxes_mode
  220. or self.layout_merge_bboxes_mode,
  221. )
  222. return {
  223. "input_path": batch_data.input_paths,
  224. "page_index": batch_data.page_indexes,
  225. "input_img": [data["ori_img"] for data in datas],
  226. "boxes": boxes,
  227. }
  228. @register("Resize")
  229. def build_resize(self, target_size, keep_ratio=False, interp=2):
  230. assert target_size
  231. if isinstance(interp, int):
  232. interp = {
  233. 0: "NEAREST",
  234. 1: "LINEAR",
  235. 2: "BICUBIC",
  236. 3: "AREA",
  237. 4: "LANCZOS4",
  238. }[interp]
  239. op = Resize(target_size=target_size[::-1], keep_ratio=keep_ratio, interp=interp)
  240. return op
  241. @register("NormalizeImage")
  242. def build_normalize(
  243. self,
  244. norm_type=None,
  245. mean=[0.485, 0.456, 0.406],
  246. std=[0.229, 0.224, 0.225],
  247. is_scale=True,
  248. ):
  249. if is_scale:
  250. scale = 1.0 / 255.0
  251. else:
  252. scale = 1
  253. if not norm_type or norm_type == "none":
  254. norm_type = "mean_std"
  255. if norm_type != "mean_std":
  256. mean = 0
  257. std = 1
  258. return Normalize(scale=scale, mean=mean, std=std)
  259. @register("Permute")
  260. def build_to_chw(self):
  261. return ToCHWImage()
  262. @register("Pad")
  263. def build_pad(self, fill_value=None, size=None):
  264. if fill_value is None:
  265. fill_value = [127.5, 127.5, 127.5]
  266. if size is None:
  267. size = [3, 640, 640]
  268. return DetPad(size=size, fill_value=fill_value)
  269. @register("PadStride")
  270. def build_pad_stride(self, stride=32):
  271. return PadStride(stride=stride)
  272. @register("WarpAffine")
  273. def build_warp_affine(self, input_h=512, input_w=512, keep_res=True):
  274. return WarpAffine(input_h=input_h, input_w=input_w, keep_res=keep_res)
  275. def build_to_batch(self):
  276. models_required_imgsize = [
  277. "DETR",
  278. "DINO",
  279. "RCNN",
  280. "YOLOv3",
  281. "CenterNet",
  282. "BlazeFace",
  283. "BlazeFace-FPN-SSH",
  284. "PP-DocLayout-L",
  285. ]
  286. if any(name in self.model_name for name in models_required_imgsize):
  287. ordered_required_keys = (
  288. "img_size",
  289. "img",
  290. "scale_factors",
  291. )
  292. else:
  293. ordered_required_keys = ("img", "scale_factors")
  294. return ToBatch(ordered_required_keys=ordered_required_keys)
  295. def build_postprocess(self):
  296. if self.threshold is None:
  297. self.threshold = self.config.get("draw_threshold", 0.5)
  298. if not self.layout_nms:
  299. self.layout_nms = self.config.get("layout_nms", None)
  300. if self.layout_unclip_ratio is None:
  301. self.layout_unclip_ratio = self.config.get("layout_unclip_ratio", None)
  302. if self.layout_merge_bboxes_mode is None:
  303. self.layout_merge_bboxes_mode = self.config.get(
  304. "layout_merge_bboxes_mode", None
  305. )
  306. return DetPostProcess(labels=self.config["label_list"])