pipeline.py 5.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152
  1. # copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. from typing import Any, Dict, Optional
  15. import numpy as np
  16. from ...utils.pp_option import PaddlePredictorOption
  17. from ...common.reader import ReadImage
  18. from ...common.batch_sampler import ImageBatchSampler
  19. from ..components import CropByBoxes, FaissIndexer, FaissBuilder, IndexData
  20. from ..base import BasePipeline
  21. from .result import ShiTuResult
  22. class ShiTuV2Pipeline(BasePipeline):
  23. """ShiTuV2 Pipeline"""
  24. entities = "PP-ShiTuV2"
  25. def __init__(
  26. self,
  27. config: Dict,
  28. device: str = None,
  29. pp_option: PaddlePredictorOption = None,
  30. use_hpip: bool = False,
  31. ):
  32. super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
  33. self._topk, self._rec_threshold, self._hamming_radius, self._det_threshold = (
  34. config.get("rec_topk", 5),
  35. config.get("rec_threshold", 0.5),
  36. config.get("hamming_radius", None),
  37. config.get("det_threshold", 0.5),
  38. )
  39. index = config.get("index", None)
  40. self.img_reader = ReadImage(format="BGR")
  41. self.det_model = self.create_model(config["SubModules"]["Detection"])
  42. self.rec_model = self.create_model(config["SubModules"]["Recognition"])
  43. self.crop_by_boxes = CropByBoxes()
  44. self.indexer = self._build_indexer(index=index) if index else None
  45. self.batch_sampler = ImageBatchSampler(
  46. batch_size=self.det_model.batch_sampler.batch_size
  47. )
  48. def predict(self, input, index=None, **kwargs):
  49. indexer = FaissIndexer(index) if index is not None else self.indexer
  50. assert indexer
  51. kwargs = {k: v for k, v in kwargs.items() if v is not None}
  52. topk = kwargs.get("rec_topk", self._topk)
  53. rec_threshold = kwargs.get("rec_threshold", self._rec_threshold)
  54. hamming_radius = kwargs.get("hamming_radius", self._hamming_radius)
  55. det_threshold = kwargs.get("det_threshold", self._det_threshold)
  56. for img_id, batch_data in enumerate(self.batch_sampler(input)):
  57. raw_imgs = self.img_reader(batch_data.instances)
  58. all_det_res = list(self.det_model(raw_imgs, threshold=det_threshold))
  59. for input_data, raw_img, det_res in zip(
  60. batch_data.instances, raw_imgs, all_det_res
  61. ):
  62. rec_res = self.get_rec_result(
  63. raw_img, det_res, indexer, rec_threshold, hamming_radius, topk
  64. )
  65. yield self.get_final_result(input_data, raw_img, det_res, rec_res)
  66. def get_rec_result(
  67. self, raw_img, det_res, indexer, rec_threshold, hamming_radius, topk
  68. ):
  69. if len(det_res["boxes"]) == 0:
  70. w, h = raw_img.shape[:2]
  71. det_res["boxes"].append(
  72. {
  73. "cls_id": 0,
  74. "label": "full_img",
  75. "score": 0,
  76. "coordinate": [0, 0, h, w],
  77. }
  78. )
  79. subs_of_img = list(self.crop_by_boxes(raw_img, det_res["boxes"]))
  80. img_list = [img["img"] for img in subs_of_img]
  81. all_rec_res = list(self.rec_model(img_list))
  82. all_rec_res = indexer(
  83. [rec_res["feature"] for rec_res in all_rec_res],
  84. score_thres=rec_threshold,
  85. hamming_radius=hamming_radius,
  86. topk=topk,
  87. )
  88. output = {"label": [], "score": []}
  89. for res in all_rec_res:
  90. output["label"].append(res["label"])
  91. output["score"].append(res["score"])
  92. return output
  93. def get_final_result(self, input_data, raw_img, det_res, rec_res):
  94. single_img_res = {"input_path": input_data, "input_img": raw_img, "boxes": []}
  95. for i, obj in enumerate(det_res["boxes"]):
  96. rec_scores = rec_res["score"][i]
  97. if isinstance(rec_scores, np.ndarray):
  98. rec_scores = rec_scores.tolist()
  99. labels = rec_res["label"][i]
  100. single_img_res["boxes"].append(
  101. {
  102. "labels": labels,
  103. "rec_scores": rec_scores,
  104. "det_score": obj["score"],
  105. "coordinate": obj["coordinate"],
  106. }
  107. )
  108. return ShiTuResult(single_img_res)
  109. def build_index(
  110. self,
  111. gallery_imgs,
  112. gallery_label,
  113. metric_type="IP",
  114. index_type="HNSW32",
  115. **kwargs
  116. ):
  117. return FaissBuilder.build(
  118. gallery_imgs,
  119. gallery_label,
  120. self.rec_model.predict,
  121. metric_type=metric_type,
  122. index_type=index_type,
  123. )
  124. def remove_index(self, remove_ids, index):
  125. return FaissBuilder.remove(remove_ids, index)
  126. def append_index(
  127. self,
  128. gallery_imgs,
  129. gallery_label,
  130. index,
  131. ):
  132. return FaissBuilder.append(
  133. gallery_imgs,
  134. gallery_label,
  135. self.rec_model.predict,
  136. index,
  137. )