Browse Source

solve conflict between ultra-infer and paddle and modify hpi info (#2850)

* solve conflict between ultra-infer and paddle and modify hpi info

* create the pipeline using hpi_params in YAML configuration

* create the pipeline using hpi_params in YAML configuration [submodule]

* update models' result reference

* add NOTE

* merge

* fix

* fix
zhang-prog 10 months ago
parent
commit
6e101efedc
51 changed files with 259 additions and 297 deletions
  1. 1 1
      libs/paddlex-hpi/pyproject.toml
  2. 2 2
      libs/paddlex-hpi/src/paddlex_hpi/models/anomaly_detection.py
  3. 0 2
      libs/paddlex-hpi/src/paddlex_hpi/models/base.py
  4. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/formula_recognition.py
  5. 2 2
      libs/paddlex-hpi/src/paddlex_hpi/models/general_recognition.py
  6. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/image_classification.py
  7. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/image_unwarping.py
  8. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/instance_segmentation.py
  9. 3 1
      libs/paddlex-hpi/src/paddlex_hpi/models/multilabel_classification.py
  10. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/object_detection.py
  11. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/semantic_segmentation.py
  12. 3 1
      libs/paddlex-hpi/src/paddlex_hpi/models/table_recognition.py
  13. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/text_detection.py
  14. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/text_recognition.py
  15. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/ts_ad.py
  16. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/ts_cls.py
  17. 1 1
      libs/paddlex-hpi/src/paddlex_hpi/models/ts_fc.py
  18. 23 7
      paddlex/inference/models_new/common/tokenizer/tokenizer_utils.py
  19. 9 7
      paddlex/inference/models_new/multilingual_speech_recognition/predictor.py
  20. 116 56
      paddlex/inference/models_new/open_vocabulary_detection/processors/groundingdino_processors.py
  21. 45 28
      paddlex/inference/models_new/open_vocabulary_segmentation/processors/sam_processer.py
  22. 0 3
      paddlex/inference/pipelines_new/__init__.py
  23. 3 9
      paddlex/inference/pipelines_new/anomaly_detection/pipeline.py
  24. 1 4
      paddlex/inference/pipelines_new/attribute_recognition/pipeline.py
  25. 3 5
      paddlex/inference/pipelines_new/base.py
  26. 1 5
      paddlex/inference/pipelines_new/doc_preprocessor/pipeline.py
  27. 1 5
      paddlex/inference/pipelines_new/formula_recognition/pipeline.py
  28. 2 6
      paddlex/inference/pipelines_new/image_classification/pipeline.py
  29. 1 5
      paddlex/inference/pipelines_new/image_multilabel_classification/pipeline.py
  30. 1 5
      paddlex/inference/pipelines_new/instance_segmentation/pipeline.py
  31. 1 5
      paddlex/inference/pipelines_new/keypoint_detection/pipeline.py
  32. 1 5
      paddlex/inference/pipelines_new/layout_parsing/pipeline.py
  33. 2 6
      paddlex/inference/pipelines_new/multilingual_speech_recognition/pipeline.py
  34. 1 5
      paddlex/inference/pipelines_new/object_detection/pipeline.py
  35. 1 5
      paddlex/inference/pipelines_new/ocr/pipeline.py
  36. 1 5
      paddlex/inference/pipelines_new/pp_chatocr/pipeline_base.py
  37. 1 5
      paddlex/inference/pipelines_new/pp_chatocr/pipeline_v3.py
  38. 1 5
      paddlex/inference/pipelines_new/pp_chatocr/pipeline_v4.py
  39. 1 4
      paddlex/inference/pipelines_new/pp_shitu_v2/pipeline.py
  40. 1 5
      paddlex/inference/pipelines_new/rotated_object__detection/pipeline.py
  41. 1 5
      paddlex/inference/pipelines_new/seal_recognition/pipeline.py
  42. 1 5
      paddlex/inference/pipelines_new/semantic_segmentation/pipeline.py
  43. 1 5
      paddlex/inference/pipelines_new/small_object__detection/pipeline.py
  44. 1 5
      paddlex/inference/pipelines_new/table_recognition/pipeline.py
  45. 3 9
      paddlex/inference/pipelines_new/ts_anomaly_detection/pipeline.py
  46. 3 9
      paddlex/inference/pipelines_new/ts_classification/pipeline.py
  47. 3 9
      paddlex/inference/pipelines_new/ts_forecasting/pipeline.py
  48. 1 5
      paddlex/inference/pipelines_new/video_classification/pipeline.py
  49. 1 5
      paddlex/inference/pipelines_new/video_detection/pipeline.py
  50. 3 25
      paddlex/paddlex_cli.py
  51. 2 0
      paddlex/utils/lazy_loader.py

+ 1 - 1
libs/paddlex-hpi/pyproject.toml

@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
 
 [project]
 name = "paddlex-hpi"
-version = "3.0.0.b2"
+version = "3.0.0.rc0"
 description = ""
 readme = "README.md"
 authors = []

+ 2 - 2
libs/paddlex-hpi/src/paddlex_hpi/models/anomaly_detection.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import SegResult
+from paddlex.inference.models_new.anomaly_detection.result import UadResult
 from paddlex.modules.anomaly_detection.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor
@@ -41,7 +41,7 @@ class UadPredictor(CVPredictor):
         return ImageBatchSampler()
 
     def _get_result_class(self) -> type:
-        return SegResult
+        return UadResult
 
     def process(self, batch_data: List[Any]) -> Dict[str, List[Any]]:
         batch_raw_imgs = self._data_reader(imgs=batch_data)

+ 0 - 2
libs/paddlex-hpi/src/paddlex_hpi/models/base.py

@@ -42,8 +42,6 @@ HPI_CONFIG_KEY: Final[str] = "Hpi"
 
 
 class HPIParams(TypedDict, total=False):
-    serial_number: Optional[str]
-    update_license: bool
     config: Dict[str, Any]
 
 

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/formula_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import FormulaRecResult
+from paddlex.inference.models_new.formula_recognition.result import FormulaRecResult
 from paddlex.modules.formula_recognition.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor

+ 2 - 2
libs/paddlex-hpi/src/paddlex_hpi/models/general_recognition.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import BaseResult
+from paddlex.inference.models_new.image_feature.result import IdentityResult
 from paddlex.modules.general_recognition.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor
@@ -41,7 +41,7 @@ class ShiTuRecPredictor(CVPredictor):
         return ImageBatchSampler()
 
     def _get_result_class(self) -> type:
-        return BaseResult
+        return IdentityResult
 
     def process(self, batch_data: List[Any]) -> Dict[str, List[Any]]:
         batch_raw_imgs = self._data_reader(imgs=batch_data)

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/image_classification.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import TopkResult
+from paddlex.inference.models_new.image_classification.result import TopkResult
 from paddlex.modules.image_classification.model_list import MODELS
 from pydantic import BaseModel
 

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/image_unwarping.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import DocTrResult
+from paddlex.inference.models_new.image_unwarping.result import DocTrResult
 from paddlex.modules.image_unwarping.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/instance_segmentation.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import InstanceSegResult
+from paddlex.inference.models_new.instance_segmentation.result import InstanceSegResult
 from paddlex.modules.instance_segmentation.model_list import MODELS
 from pydantic import BaseModel
 

+ 3 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/multilabel_classification.py

@@ -21,7 +21,9 @@ from pathlib import Path
 import tempfile
 import yaml
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import MLClassResult
+from paddlex.inference.models_new.image_multilabel_classification.result import (
+    MLClassResult,
+)
 from paddlex.modules.multilabel_classification.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor, HPIParams

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/object_detection.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import DetResult
+from paddlex.inference.models_new.object_detection.result import DetResult
 from paddlex.modules.object_detection.model_list import MODELS
 from pydantic import BaseModel
 

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/semantic_segmentation.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Tuple, Union
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import SegResult
+from paddlex.inference.models_new.semantic_segmentation.result import SegResult
 from paddlex.modules.semantic_segmentation.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor, HPIParams

+ 3 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/table_recognition.py

@@ -18,7 +18,9 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import TableRecResult
+from paddlex.inference.models_new.table_structure_recognition.result import (
+    TableRecResult,
+)
 from paddlex.modules.table_recognition.model_list import MODELS
 
 from paddlex_hpi._utils.compat import get_compat_version

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/text_detection.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List, Optional, Union
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import TextDetResult
+from paddlex.inference.models_new.text_detection.result import TextDetResult
 from paddlex.modules.text_detection.model_list import CURVE_MODELS, MODELS
 
 from paddlex_hpi._utils.misc import parse_scale

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/text_recognition.py

@@ -18,7 +18,7 @@ from typing import Any, Dict, List
 import ultra_infer as ui
 import numpy as np
 from paddlex.inference.common.batch_sampler import ImageBatchSampler
-from paddlex.inference.results import TextRecResult
+from paddlex.inference.models_new.text_recognition.result import TextRecResult
 from paddlex.modules.text_recognition.model_list import MODELS
 
 from paddlex_hpi.models.base import CVPredictor

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/ts_ad.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Union
 import ultra_infer as ui
 import pandas as pd
 from paddlex.inference.common.batch_sampler import TSBatchSampler
-from paddlex.inference.results import TSAdResult
+from paddlex.inference.models_new.ts_anomaly_detection.result import TSAdResult
 from paddlex.modules.ts_anomaly_detection.model_list import MODELS
 
 from paddlex_hpi.models.base import TSPredictor

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/ts_cls.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Union
 import ultra_infer as ui
 import pandas as pd
 from paddlex.inference.common.batch_sampler import TSBatchSampler
-from paddlex.inference.results import TSClsResult
+from paddlex.inference.models_new.ts_classification.result import TSClsResult
 from paddlex.modules.ts_classification.model_list import MODELS
 
 from paddlex_hpi.models.base import TSPredictor

+ 1 - 1
libs/paddlex-hpi/src/paddlex_hpi/models/ts_fc.py

@@ -17,7 +17,7 @@ from typing import Any, Dict, List, Union
 import ultra_infer as ui
 import pandas as pd
 from paddlex.inference.common.batch_sampler import TSBatchSampler
-from paddlex.inference.results import TSFcResult
+from paddlex.inference.models_new.ts_forecasting.result import TSFcResult
 from paddlex.modules.ts_forecast.model_list import MODELS
 
 from paddlex_hpi.models.base import TSPredictor

+ 23 - 7
paddlex/inference/models_new/common/tokenizer/tokenizer_utils.py

@@ -267,7 +267,13 @@ def adapt_stale_fwd_patch(self, name, value):
     return value
 
 
-class InitTrackerMeta(type(paddle.nn.Layer)):
+# NOTE:
+# Modification:
+#   class InitTrackerMeta(type(paddle.nn.Layer)) -> class InitTrackerMeta(type)
+# Context:
+#   1. In paddle 3.0rc, type(paddle.nn.Layer) == type
+#   2. Solve the conflict between ultra-infer and paddle
+class InitTrackerMeta(type):
     """
     This metaclass wraps the `__init__` method of a class to add `init_config`
     attribute for instances of that class, and `init_config` use a dict to track
@@ -639,7 +645,7 @@ class ChatTemplateMixin:
         tokenize: bool = True,
         context_data: Dict[str, Any] = {},
         **tokenizer_kwargs,
-    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+    ) -> str | dict[str, Union["numpy.ndarray", "paddle.Tensor"]]:
         """apply chat_template rules to conversation which should not be batched data
 
         Args:
@@ -648,7 +654,7 @@ class ChatTemplateMixin:
             tokenize (bool, optional): whether do tokenization. Defaults to True.
 
         Returns:
-            str | dict[str, numpy.ndarray | paddle.Tensor]: return the result of applied data
+            str | dict[str, Union["numpy.ndarray", "paddle.Tensor"]]: return the result of applied data
         """
         if not self.chat_template:
             raise ValueError(
@@ -673,7 +679,7 @@ class ChatTemplateMixin:
         self,
         conversation: List[Dict[str, str]] | str,
         context_data: Dict[str, Any] = {},
-    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+    ) -> str | dict[str, Union["numpy.ndarray", "paddle.Tensor"]]:
         context_data = self.chat_template._init_context_data(context_data)
 
         if isinstance(conversation, str):
@@ -691,7 +697,7 @@ class ChatTemplateMixin:
         self,
         conversation: Union[Dict[str, str] | Dict[str, str]] | str,
         add_generation_prompt=True,
-    ) -> str | dict[str, numpy.ndarray | paddle.Tensor]:
+    ) -> str | dict[str, Union["numpy.ndarray", "paddle.Tensor"]]:
         if isinstance(conversation, str):
             conversations = [{"role": "user", "content": conversation}]
         elif isinstance(conversation, list):
@@ -2047,6 +2053,7 @@ class PretrainedTokenizer(ChatTemplateMixin, PretrainedTokenizerBase):
         else:
             return "", prefix_offset, read_offset
 
+
 def _is_control(char):
     """Checks whether `chars` is a control character."""
     # These are technically control characters but we count them as whitespace
@@ -2058,6 +2065,7 @@ def _is_control(char):
         return True
     return False
 
+
 def _is_punctuation(char):
     """Checks whether `chars` is a punctuation character."""
     cp = ord(char)
@@ -2065,13 +2073,19 @@ def _is_punctuation(char):
     # Characters such as "^", "$", and "`" are not in the Unicode
     # Punctuation class but we treat them as punctuation anyways, for
     # consistency.
-    if (cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126):
+    if (
+        (cp >= 33 and cp <= 47)
+        or (cp >= 58 and cp <= 64)
+        or (cp >= 91 and cp <= 96)
+        or (cp >= 123 and cp <= 126)
+    ):
         return True
     cat = unicodedata.category(char)
     if cat.startswith("P"):
         return True
     return False
 
+
 def _is_symbol(char):
     """Check whether CP is the codepoint of a Symbol character."""
     cp = ord(char)
@@ -2081,6 +2095,7 @@ def _is_symbol(char):
         return True
     return False
 
+
 def _is_whitespace(char):
     """
     Checks whether `chars` is a whitespace character.
@@ -2094,6 +2109,7 @@ def _is_whitespace(char):
         return True
     return False
 
+
 def convert_to_unicode(text):
     """
     Converts `text` to Unicode (if it's not already), assuming utf-8 input.
@@ -2109,6 +2125,7 @@ def convert_to_unicode(text):
     else:
         raise ValueError("Unsupported string type: %s" % (type(text)))
 
+
 def whitespace_tokenize(text):
     """
     Runs basic whitespace cleaning and splitting on a peice of text.
@@ -2122,4 +2139,3 @@ def whitespace_tokenize(text):
         return []
     tokens = text.split()
     return tokens
-    

+ 9 - 7
paddlex/inference/models_new/multilingual_speech_recognition/predictor.py

@@ -20,13 +20,6 @@ from ...common.batch_sampler import AudioBatchSampler
 from ..base import BasicPredictor
 from .result import WhisperResult
 from ...utils.io import AudioReader
-from .processors import (
-    ModelDimensions,
-    log_mel_spectrogram,
-    Whisper,
-    LANGUAGES,
-    TO_LANGUAGE_CODE,
-)
 from ....modules.multilingual_speech_recognition.model_list import MODELS
 from ....utils.download import download_and_extract
 
@@ -70,6 +63,13 @@ class WhisperPredictor(BasicPredictor):
         Returns:
             AudioReader: An instance of AudioReader.
         """
+        from .processors import (
+            ModelDimensions,
+            Whisper,
+            LANGUAGES,
+            TO_LANGUAGE_CODE,
+        )
+
         # build model
         model_dict = paddle.load(self.config["model_file"])
         dims = ModelDimensions(**model_dict["dims"])
@@ -91,6 +91,8 @@ class WhisperPredictor(BasicPredictor):
         Returns:
             dict: A dictionary containing the input path and result. The result include 'text', 'segments' and 'language'.
         """
+        from .processors import log_mel_spectrogram
+
         # load mel_filters from resource_dir and extract feature for audio
         audio, sample_rate = self.audio_reader.read(batch_data[0])
         audio = paddle.to_tensor(audio)

+ 116 - 56
paddlex/inference/models_new/open_vocabulary_detection/processors/groundingdino_processors.py

@@ -18,10 +18,14 @@ from typing import Dict, List, Optional, Union, Tuple
 import numpy as np
 import PIL
 
-import paddle
-import paddle.vision.transforms as T
-import paddle.nn.functional as F
 from ...common.tokenizer.bert_tokenizer import BertTokenizer
+from .....utils.lazy_loader import LazyLoader
+
+# NOTE: LazyLoader is used to avoid conflicts between ultra-infer and Paddle
+paddle = LazyLoader("lazy_paddle", globals(), "paddle")
+T = LazyLoader("T", globals(), "paddle.vision.transforms")
+F = LazyLoader("F", globals(), "paddle.nn.functional")
+
 
 def _max_by_axis(the_list):
     maxes = the_list[0]
@@ -30,6 +34,7 @@ def _max_by_axis(the_list):
             maxes[index] = max(maxes[index], item)
     return maxes
 
+
 def _text_pad_batch_data(
     insts,
     pad_idx=0,
@@ -45,7 +50,7 @@ def _text_pad_batch_data(
     extract=False,
 ):
     """Pad the instances to the max sequence length in batch, and generate the
-       corresponding position data and attention bias.
+    corresponding position data and attention bias.
     """
     return_list = []
     max_len = max(len(inst) for inst in insts) if max_seq_len is None else max_seq_len
@@ -54,16 +59,25 @@ def _text_pad_batch_data(
         boxes = np.array(boxes, dtype="int64")
         return boxes
 
-    inst_data = np.array([inst + list([pad_idx] * (max_len - len(inst))) for inst in insts])
+    inst_data = np.array(
+        [inst + list([pad_idx] * (max_len - len(inst))) for inst in insts]
+    )
     return_list += [inst_data.astype("int64").reshape([-1, max_len, 1])]
 
     if return_pos:
-        inst_pos = np.array([list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst)) for inst in insts])
+        inst_pos = np.array(
+            [
+                list(range(0, len(inst))) + [pad_idx] * (max_len - len(inst))
+                for inst in insts
+            ]
+        )
 
         return_list += [inst_pos.astype("int64").reshape([-1, max_len, 1])]
 
     if return_input_mask:
-        input_mask_data = np.array([[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts])
+        input_mask_data = np.array(
+            [[1] * len(inst) + [0] * (max_len - len(inst)) for inst in insts]
+        )
         input_mask_data = np.expand_dims(input_mask_data, axis=-1)
         return_list += [input_mask_data.astype("float32")]
 
@@ -84,8 +98,8 @@ def _text_pad_batch_data(
 
 
 class GroundingDINOPostProcessor(object):
-    """PostProcessors for GroundingDINO
-    """
+    """PostProcessors for GroundingDINO"""
+
     def __init__(
         self,
         tokenizer,
@@ -103,10 +117,21 @@ class GroundingDINOPostProcessor(object):
         self.box_threshold = box_threshold
         self.text_threshold = text_threshold
 
-    def __call__(self, pred_boxes, pred_logits, prompt, src_images, box_threshold = None, text_threshold = None, **kwargs):
-        
+    def __call__(
+        self,
+        pred_boxes,
+        pred_logits,
+        prompt,
+        src_images,
+        box_threshold=None,
+        text_threshold=None,
+        **kwargs,
+    ):
+
         box_threshold = self.box_threshold if box_threshold is None else box_threshold
-        text_threshold = self.text_threshold if text_threshold is None else text_threshold
+        text_threshold = (
+            self.text_threshold if text_threshold is None else text_threshold
+        )
 
         if isinstance(pred_logits, np.ndarray):
             pred_logits = paddle.to_tensor(pred_logits)
@@ -119,16 +144,30 @@ class GroundingDINOPostProcessor(object):
         rst_boxes = []
         for pred_logit, pred_box, src_image in zip(pred_logits, pred_boxes, src_images):
             rst_boxes.append(
-                self.postprocess(pred_logit, pred_box, prompt, src_image, box_threshold, text_threshold)
+                self.postprocess(
+                    pred_logit,
+                    pred_box,
+                    prompt,
+                    src_image,
+                    box_threshold,
+                    text_threshold,
+                )
             )
 
         return rst_boxes
-        
-    def postprocess(self, pred_logits, pred_boxes, src_prompt, src_image, box_threshold, text_threshold):
-        """Post Process for prediction result of single image.
-        """
 
-        logits = F.sigmoid(pred_logits) 
+    def postprocess(
+        self,
+        pred_logits,
+        pred_boxes,
+        src_prompt,
+        src_image,
+        box_threshold,
+        text_threshold,
+    ):
+        """Post Process for prediction result of single image."""
+
+        logits = F.sigmoid(pred_logits)
         boxes = pred_boxes
 
         logits_filt = logits.clone()
@@ -150,7 +189,7 @@ class GroundingDINOPostProcessor(object):
                 {
                     "coordinate": box.detach().cpu().tolist(),
                     "label": pred_phrase,
-                    "score": pred_score
+                    "score": pred_score,
                 }
             )
 
@@ -168,8 +207,7 @@ class GroundingDINOPostProcessor(object):
 
 
 class GroundingDINOProcessor(object):
-    """Image and Text Processors for GroundingDINO
-    """
+    """Image and Text Processors for GroundingDINO"""
 
     def __init__(
         self,
@@ -192,9 +230,8 @@ class GroundingDINOProcessor(object):
             image_std=image_std,
             do_nested=image_do_nested,
         )
-        tokenizer_dir = os.path.join(model_dir, 'tokenizer')
-        assert os.path.isdir(tokenizer_dir), \
-            f'{tokenizer_dir} not exists.'
+        tokenizer_dir = os.path.join(model_dir, "tokenizer")
+        assert os.path.isdir(tokenizer_dir), f"{tokenizer_dir} not exists."
         self.tokenizer = BertTokenizer.from_pretrained(tokenizer_dir)
 
     def __call__(
@@ -203,23 +240,30 @@ class GroundingDINOProcessor(object):
         text: str,
         **kwargs,
     ):
-            
+
         self.prompt = self.text_processor.pre_caption(text)
-        input_ids = self.tokenizer([self.prompt]).input_ids 
-        special_tokens = self.tokenizer.convert_tokens_to_ids(["[CLS]", "[SEP]", ".", "?"])
+        input_ids = self.tokenizer([self.prompt]).input_ids
+        special_tokens = self.tokenizer.convert_tokens_to_ids(
+            ["[CLS]", "[SEP]", ".", "?"]
+        )
         tokenized_out = self.text_processor(input_ids, special_tokens)
 
         image_tensor, mask = self.image_processor(images)
 
         paddle_rst = [
-            tokenized_out['attention_mask'], tokenized_out['input_ids'], mask, tokenized_out['position_ids'], tokenized_out['text_self_attention_masks'], image_tensor
+            tokenized_out["attention_mask"],
+            tokenized_out["input_ids"],
+            mask,
+            tokenized_out["position_ids"],
+            tokenized_out["text_self_attention_masks"],
+            image_tensor,
         ]
         return [arr.numpy() for arr in paddle_rst]
 
 
 class GroundingDinoTextProcessor(object):
-    """Constructs a GroundingDino text processor.
-    """
+    """Constructs a GroundingDino text processor."""
+
     def __init__(
         self,
         max_words: int = 256,
@@ -231,39 +275,45 @@ class GroundingDinoTextProcessor(object):
         input_ids,
         special_tokens_list,
     ):
-        """Preprocess the text with tokenization.
-        """
+        """Preprocess the text with tokenization."""
         tokenized_out = {}
         input_ids = _text_pad_batch_data(input_ids)
         input_ids = paddle.to_tensor(input_ids, dtype=paddle.int64).squeeze(-1)
-        tokenized_out["input_ids"] = input_ids 
+        tokenized_out["input_ids"] = input_ids
         tokenized_out["attention_mask"] = paddle.cast(input_ids != 0, paddle.int64)
 
         (
             text_self_attention_masks,
             position_ids,
             cate_to_token_mask_list,
-        ) = self.generate_masks_with_special_tokens_and_transfer_map(tokenized_out, special_tokens_list)
+        ) = self.generate_masks_with_special_tokens_and_transfer_map(
+            tokenized_out, special_tokens_list
+        )
 
         if text_self_attention_masks.shape[1] > self.max_words:
-            text_self_attention_masks = text_self_attention_masks[:, : self.max_words, : self.max_words]
+            text_self_attention_masks = text_self_attention_masks[
+                :, : self.max_words, : self.max_words
+            ]
             position_ids = position_ids[:, : self.max_words]
             tokenized_out["input_ids"] = tokenized_out["input_ids"][:, : self.max_words]
-            tokenized_out["attention_mask"] = tokenized_out["attention_mask"][:, : self.max_words]
+            tokenized_out["attention_mask"] = tokenized_out["attention_mask"][
+                :, : self.max_words
+            ]
         tokenized_out["position_ids"] = position_ids
         tokenized_out["text_self_attention_masks"] = text_self_attention_masks
 
         return tokenized_out
 
     def pre_caption(self, caption: str) -> str:
-        """Preprocess the text before tokenization.
-        """
+        """Preprocess the text before tokenization."""
         caption = caption.strip()
         if not caption.endswith("."):
             caption = caption + "."
         return caption
 
-    def generate_masks_with_special_tokens_and_transfer_map(self, tokenized, special_tokens_list):
+    def generate_masks_with_special_tokens_and_transfer_map(
+        self, tokenized, special_tokens_list
+    ):
         """Generate attention mask between each pair of special tokens
         Args:
             input_ids (torch.Tensor): input ids. Shape: [bs, num_token]
@@ -279,19 +329,28 @@ class GroundingDinoTextProcessor(object):
 
         idxs = paddle.nonzero(special_tokens_mask)
 
-        attention_mask = paddle.eye(num_token, dtype=paddle.int32).cast(paddle.bool).unsqueeze(0).tile([bs, 1, 1])
+        attention_mask = (
+            paddle.eye(num_token, dtype=paddle.int32)
+            .cast(paddle.bool)
+            .unsqueeze(0)
+            .tile([bs, 1, 1])
+        )
         position_ids = paddle.zeros((bs, num_token), dtype=paddle.int64)
         cate_to_token_mask_list = [[] for _ in range(bs)]
         previous_col = 0
 
-        for i in range(idxs.shape[0]): 
-            row, col = idxs[i] 
+        for i in range(idxs.shape[0]):
+            row, col = idxs[i]
             if (col == 0) or (col == num_token - 1):
                 attention_mask[row, col, col] = True
                 position_ids[row, col] = 0
             else:
-                attention_mask[row, previous_col + 1 : col + 1, previous_col + 1 : col + 1] = True
-                position_ids[row, previous_col + 1 : col + 1] = paddle.arange(0, col - previous_col)
+                attention_mask[
+                    row, previous_col + 1 : col + 1, previous_col + 1 : col + 1
+                ] = True
+                position_ids[row, previous_col + 1 : col + 1] = paddle.arange(
+                    0, col - previous_col
+                )
                 c2t_maski = paddle.zeros(
                     [
                         num_token,
@@ -299,13 +358,14 @@ class GroundingDinoTextProcessor(object):
                 ).cast(paddle.bool)
                 c2t_maski[previous_col + 1 : col] = True
                 cate_to_token_mask_list[row].append(c2t_maski)
-            previous_col = col 
+            previous_col = col
 
         return attention_mask, position_ids.cast(paddle.int64), cate_to_token_mask_list
 
+
 class GroundingDinoImageProcessor(object):
-    """Constructs a GroundingDino image processor.
-    """
+    """Constructs a GroundingDino image processor."""
+
     def __init__(
         self,
         do_resize: bool = True,
@@ -323,7 +383,7 @@ class GroundingDinoImageProcessor(object):
 
         self.do_resize = do_resize
         self.do_normalize = do_normalize
-        self.image_mean = image_mean 
+        self.image_mean = image_mean
         self.image_std = image_std
         self.do_nested = do_nested
 
@@ -332,8 +392,8 @@ class GroundingDinoImageProcessor(object):
         return self.preprocess(images, **kwargs)
 
     def resize(self, image, size=None, max_size=1333):
-        """Officially aligned Image resize.
-        """
+        """Officially aligned Image resize."""
+
         def get_size_with_aspect_ratio(image_size, size, max_size=None):
             w, h = image_size
             if max_size is not None:
@@ -394,8 +454,7 @@ class GroundingDinoImageProcessor(object):
         do_nested: bool = None,
         **kwargs,
     ):
-        """Preprocess an image or batch of images.
-        """
+        """Preprocess an image or batch of images."""
         do_resize = do_resize if do_resize is not None else self.do_resize
         do_normalize = do_normalize if do_normalize is not None else self.do_normalize
         do_nested = do_nested if do_nested is not None else self.do_nested
@@ -407,14 +466,15 @@ class GroundingDinoImageProcessor(object):
         if not isinstance(images, (list, tuple)):
             images = [images]
         if isinstance(images[0], np.ndarray):
-            images = [
-                PIL.Image.fromarray(image) for image in images
-            ]
+            images = [PIL.Image.fromarray(image) for image in images]
 
         if do_resize:
             min_size = min(self.target_size)
             max_size = max(self.target_size)
-            images = [T.to_tensor(self.resize(image=image, size=min_size, max_size = max_size)) for image in images]
+            images = [
+                T.to_tensor(self.resize(image=image, size=min_size, max_size=max_size))
+                for image in images
+            ]
 
         if do_normalize:
             images = T.normalize(images, mean=image_mean, std=image_std)

+ 45 - 28
paddlex/inference/models_new/open_vocabulary_segmentation/processors/sam_processer.py

@@ -19,19 +19,25 @@ import numpy as np
 import PIL
 from copy import deepcopy
 
-import paddle
-import paddle.vision.transforms as T
-import paddle.nn.functional as F
+from .....utils.lazy_loader import LazyLoader
 
-def _get_preprocess_shape(oldh: int, oldw: int, long_side_length: int) -> Tuple[int, int]:
-    """Compute the output size given input size and target long side length.
-    """
+# NOTE: LazyLoader is used to avoid conflicts between ultra-infer and Paddle
+paddle = LazyLoader("lazy_paddle", globals(), "paddle")
+T = LazyLoader("T", globals(), "paddle.vision.transforms")
+F = LazyLoader("F", globals(), "paddle.nn.functional")
+
+
+def _get_preprocess_shape(
+    oldh: int, oldw: int, long_side_length: int
+) -> Tuple[int, int]:
+    """Compute the output size given input size and target long side length."""
     scale = long_side_length * 1.0 / max(oldh, oldw)
     newh, neww = oldh * scale, oldw * scale
     neww = int(neww + 0.5)
     newh = int(newh + 0.5)
     return (newh, neww)
 
+
 class SAMProcessor(object):
 
     def __init__(
@@ -53,7 +59,9 @@ class SAMProcessor(object):
         self.image_mean = image_mean
         self.image_std = image_std
 
-        self.image_processor = SamImageProcessor(self.size, self.image_mean, self.image_std)
+        self.image_processor = SamImageProcessor(
+            self.size, self.image_mean, self.image_std
+        )
         self.prompt_processor = SamPromptProcessor(self.size)
 
     def preprocess(
@@ -74,8 +82,12 @@ class SAMProcessor(object):
                 "SAM must use either points or boxes as prompt, now both is None."
             )
 
-        point_prompt = np.array(point_prompt).reshape(-1, 2) if point_prompt is not None else None
-        box_prompt = np.array(box_prompt).reshape(-1, 4) if box_prompt is not None else None
+        point_prompt = (
+            np.array(point_prompt).reshape(-1, 2) if point_prompt is not None else None
+        )
+        box_prompt = (
+            np.array(box_prompt).reshape(-1, 4) if box_prompt is not None else None
+        )
 
         if point_prompt is not None and point_prompt.size > 2:
             raise ValueError(
@@ -94,11 +106,11 @@ class SAMProcessor(object):
         return image_seg, prompt
 
     def postprocess(self, low_res_masks, mask_threshold: float = 0.0):
-        
+
         if isinstance(low_res_masks, list):
             assert len(low_res_masks) == 1
             low_res_masks = low_res_masks[0]
-            
+
         masks = F.interpolate(
             paddle.to_tensor(low_res_masks),
             (self.size, self.size),
@@ -106,15 +118,16 @@ class SAMProcessor(object):
             align_corners=False,
         )
         masks = masks[..., : self.input_size[0], : self.input_size[1]]
-        masks = F.interpolate(masks, self.original_size, mode="bilinear", align_corners=False)
+        masks = F.interpolate(
+            masks, self.original_size, mode="bilinear", align_corners=False
+        )
         masks = (masks > mask_threshold).numpy().astype(np.int8)
 
         return [masks]
 
 
 class SamPromptProcessor(object):
-    """Constructs a Sam prompt processor.
-    """
+    """Constructs a Sam prompt processor."""
 
     def __init__(
         self,
@@ -122,20 +135,26 @@ class SamPromptProcessor(object):
     ):
         self.size = size
 
-    def apply_coords(self, coords: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
+    def apply_coords(
+        self, coords: np.ndarray, original_size: Tuple[int, ...]
+    ) -> np.ndarray:
         """Expects a numpy array of length 2 in the final dimension. Requires the
-           original image size in (H, W) format.
+        original image size in (H, W) format.
         """
         old_h, old_w = original_size
-        new_h, new_w = _get_preprocess_shape(original_size[0], original_size[1], self.size)
+        new_h, new_w = _get_preprocess_shape(
+            original_size[0], original_size[1], self.size
+        )
         coords = deepcopy(coords).astype(float)
         coords[..., 0] = coords[..., 0] * (new_w / old_w)
         coords[..., 1] = coords[..., 1] * (new_h / old_h)
         return coords
 
-    def apply_boxes(self, boxes: np.ndarray, original_size: Tuple[int, ...]) -> np.ndarray:
+    def apply_boxes(
+        self, boxes: np.ndarray, original_size: Tuple[int, ...]
+    ) -> np.ndarray:
         """Expects a numpy array shape Nx4. Requires the original image size
-           in (H, W) format.
+        in (H, W) format.
         """
         boxes = self.apply_coords(boxes.reshape([-1, 2, 2]), original_size)
         return boxes.reshape([-1, 4])
@@ -160,9 +179,9 @@ class SamPromptProcessor(object):
             box = self.apply_boxes(box, original_size)
             return box.astype(np.float32)
 
+
 class SamImageProcessor(object):
-    """Constructs a Sam image processor.
-    """
+    """Constructs a Sam image processor."""
 
     def __init__(
         self,
@@ -171,7 +190,7 @@ class SamImageProcessor(object):
         image_std: Union[float, List[float]] = [0.5, 0.5, 0.5],
         **kwargs,
     ) -> None:
-    
+
         size = size if size is not None else 1024
         self.size = size
 
@@ -187,8 +206,7 @@ class SamImageProcessor(object):
         self.input_size = None
 
     def apply_image(self, image: np.ndarray) -> np.ndarray:
-        """Expects a numpy array with shape HxWxC in uint8 format.
-        """
+        """Expects a numpy array with shape HxWxC in uint8 format."""
         target_size = _get_preprocess_shape(image.shape[0], image.shape[1], self.size)
         if isinstance(image, np.ndarray):
             image = PIL.Image.fromarray(image)
@@ -199,13 +217,12 @@ class SamImageProcessor(object):
         if not isinstance(images, (list, tuple)):
             images = [images]
         return self.preprocess(images)
-        
+
     def preprocess(
         self,
         images,
     ):
-        """Preprocess an image or a batch of images with a same shape.
-        """
+        """Preprocess an image or a batch of images with a same shape."""
 
         size = self.size
 
@@ -229,4 +246,4 @@ class SamImageProcessor(object):
         padw = self.size - w
         input_image = F.pad(input_image_paddle, (0, padw, 0, padh))
 
-        return input_image.numpy()
+        return input_image.numpy()

+ 0 - 3
paddlex/inference/pipelines_new/__init__.py

@@ -100,7 +100,6 @@ def create_pipeline(
     device: str = None,
     pp_option: PaddlePredictorOption = None,
     use_hpip: bool = False,
-    hpi_params: Optional[Dict[str, Any]] = None,
     *args,
     **kwargs,
 ) -> BasePipeline:
@@ -115,7 +114,6 @@ def create_pipeline(
         device (str, optional): The device to run the pipeline on. Defaults to None.
         pp_option (PaddlePredictorOption, optional): The options for the PaddlePredictor. Defaults to None.
         use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-        hpi_params (Optional[Dict[str, Any]], optional): Additional parameters for hpip. Defaults to None.
         *args: Additional positional arguments.
         **kwargs: Additional keyword arguments.
 
@@ -134,7 +132,6 @@ def create_pipeline(
         device=device,
         pp_option=pp_option,
         use_hpip=use_hpip,
-        hpi_params=hpi_params,
         *args,
         **kwargs,
     )

+ 3 - 9
paddlex/inference/pipelines_new/anomaly_detection/pipeline.py

@@ -1,4 +1,4 @@
-# copyright (c) 2025 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,7 +33,6 @@ class AnomalyDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the image anomaly detection pipeline.
 
@@ -42,17 +41,12 @@ class AnomalyDetectionPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         anomaly_detetion_model_config = config["SubModules"]["AnomalyDetection"]
-        self.anomaly_detetion_model = self.create_model(
-            anomaly_detetion_model_config
-        )
+        self.anomaly_detetion_model = self.create_model(anomaly_detetion_model_config)
 
     def predict(
         self, input: str | list[str] | np.ndarray | list[np.ndarray], **kwargs

+ 1 - 4
paddlex/inference/pipelines_new/attribute_recognition/pipeline.py

@@ -35,11 +35,8 @@ class AttributeRecPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ):
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.det_model = self.create_model(config["SubModules"]["Detection"])
         self.cls_model = self.create_model(config["SubModules"]["Classification"])

+ 3 - 5
paddlex/inference/pipelines_new/base.py

@@ -37,7 +37,6 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
         *args,
         **kwargs,
     ) -> None:
@@ -48,13 +47,11 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
             device (str, optional): The device to use for prediction. Defaults to None.
             pp_option (PaddlePredictorOption, optional): The options for PaddlePredictor. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Dict[str, Any], optional): Additional parameters for hpip. Defaults to None.
         """
         super().__init__()
         self.device = device
         self.pp_option = pp_option
         self.use_hpip = use_hpip
-        self.hpi_params = hpi_params
 
     @abstractmethod
     def predict(self, input, **kwargs):
@@ -82,6 +79,8 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
             raise ValueError(config["model_config_error"])
 
         model_dir = config.get("model_dir", None)
+        hpi_params = config.get("hpi_params", None)
+
         from .. import create_predictor
 
         model = create_predictor(
@@ -90,7 +89,7 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
             device=self.device,
             pp_option=self.pp_option,
             use_hpip=self.use_hpip,
-            hpi_params=self.hpi_params,
+            hpi_params=hpi_params,
             **kwargs,
         )
         return model
@@ -117,7 +116,6 @@ class BasePipeline(ABC, metaclass=AutoRegisterABCMetaClass):
             device=self.device,
             pp_option=self.pp_option,
             use_hpip=self.use_hpip,
-            hpi_params=self.hpi_params,
         )
         return pipeline
 

+ 1 - 5
paddlex/inference/pipelines_new/doc_preprocessor/pipeline.py

@@ -34,7 +34,6 @@ class DocPreprocessorPipeline(BasePipeline):
         device: Optional[str] = None,
         pp_option: Optional[PaddlePredictorOption] = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the doc preprocessor pipeline.
 
@@ -43,12 +42,9 @@ class DocPreprocessorPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.use_doc_orientation_classify = config.get(
             "use_doc_orientation_classify", True

+ 1 - 5
paddlex/inference/pipelines_new/formula_recognition/pipeline.py

@@ -45,7 +45,6 @@ class FormulaRecognitionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the formula recognition pipeline.
 
@@ -54,12 +53,9 @@ class FormulaRecognitionPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
         if self.use_doc_preprocessor:

+ 2 - 6
paddlex/inference/pipelines_new/image_classification/pipeline.py

@@ -34,7 +34,6 @@ class ImageClassificationPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -44,11 +43,8 @@ class ImageClassificationPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         image_classification_model_config = config["SubModules"]["ImageClassification"]
         model_kwargs = {}
@@ -71,6 +67,6 @@ class ImageClassificationPipeline(BasePipeline):
         Returns:
             TopkResult: The predicted top k results.
         """
-      
+
         topk = kwargs.pop("topk", self.topk)
         yield from self.image_classification_model(input, topk=topk)

+ 1 - 5
paddlex/inference/pipelines_new/image_multilabel_classification/pipeline.py

@@ -34,7 +34,6 @@ class ImageMultiLabelClassificationPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -44,11 +43,8 @@ class ImageMultiLabelClassificationPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.threshold = config["SubModules"]["ImageMultiLabelClassification"].get(
             "threshold", None

+ 1 - 5
paddlex/inference/pipelines_new/instance_segmentation/pipeline.py

@@ -32,7 +32,6 @@ class InstanceSegmentationPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class InstanceSegmentationPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         instance_segmentation_model_config = config["SubModules"][
             "InstanceSegmentation"

+ 1 - 5
paddlex/inference/pipelines_new/keypoint_detection/pipeline.py

@@ -34,7 +34,6 @@ class KeypointDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -44,11 +43,8 @@ class KeypointDetectionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         # create object detection model
         model_cfg = config["SubModules"]["ObjectDetection"]

+ 1 - 5
paddlex/inference/pipelines_new/layout_parsing/pipeline.py

@@ -41,7 +41,6 @@ class LayoutParsingPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the layout parsing pipeline.
 
@@ -50,12 +49,9 @@ class LayoutParsingPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.inintial_predictor(config)
 

+ 2 - 6
paddlex/inference/pipelines_new/multilingual_speech_recognition/pipeline.py

@@ -1,4 +1,4 @@
-# copyright (c) 2025 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -31,7 +31,6 @@ class MultilingualSpeechRecognitionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -41,11 +40,8 @@ class MultilingualSpeechRecognitionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         multilingual_speech_recognition_model_config = config["SubModules"][
             "MultilingualSpeechRecognition"

+ 1 - 5
paddlex/inference/pipelines_new/object_detection/pipeline.py

@@ -33,7 +33,6 @@ class ObjectDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -43,11 +42,8 @@ class ObjectDetectionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
         model_cfg = config["SubModules"]["ObjectDetection"]
         model_kwargs = {}
         if "threshold" in model_cfg:

+ 1 - 5
paddlex/inference/pipelines_new/ocr/pipeline.py

@@ -41,7 +41,6 @@ class OCRPipeline(BasePipeline):
         device: Optional[str] = None,
         pp_option: Optional[PaddlePredictorOption] = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -51,11 +50,8 @@ class OCRPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
         if self.use_doc_preprocessor:

+ 1 - 5
paddlex/inference/pipelines_new/pp_chatocr/pipeline_base.py

@@ -26,7 +26,6 @@ class PP_ChatOCR_Pipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the pp-chatocrv3-doc pipeline.
 
@@ -35,12 +34,9 @@ class PP_ChatOCR_Pipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
     def visual_predict(self):
         """

+ 1 - 5
paddlex/inference/pipelines_new/pp_chatocr/pipeline_v3.py

@@ -37,7 +37,6 @@ class PP_ChatOCRv3_Pipeline(PP_ChatOCR_Pipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the pp-chatocrv3-doc pipeline.
 
@@ -46,13 +45,10 @@ class PP_ChatOCRv3_Pipeline(PP_ChatOCR_Pipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
             use_layout_parsing (bool, optional): Whether to use layout parsing. Defaults to True.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.pipeline_name = config["pipeline_name"]
 

+ 1 - 5
paddlex/inference/pipelines_new/pp_chatocr/pipeline_v4.py

@@ -39,7 +39,6 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the pp-chatocrv3-doc pipeline.
 
@@ -48,13 +47,10 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
             use_layout_parsing (bool, optional): Whether to use layout parsing. Defaults to True.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.pipeline_name = config["pipeline_name"]
 

+ 1 - 4
paddlex/inference/pipelines_new/pp_shitu_v2/pipeline.py

@@ -37,11 +37,8 @@ class ShiTuV2Pipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ):
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self._topk, self._rec_threshold, self._hamming_radius, self._det_threshold = (
             config.get("topk", 5),

+ 1 - 5
paddlex/inference/pipelines_new/rotated_object__detection/pipeline.py

@@ -32,7 +32,6 @@ class RotatedObjectDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class RotatedObjectDetectionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         rotated_object_detection_model_config = config["SubModules"][
             "RotatedObjectDetection"

+ 1 - 5
paddlex/inference/pipelines_new/seal_recognition/pipeline.py

@@ -40,7 +40,6 @@ class SealRecognitionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the seal recognition pipeline.
 
@@ -49,12 +48,9 @@ class SealRecognitionPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
         if self.use_doc_preprocessor:

+ 1 - 5
paddlex/inference/pipelines_new/semantic_segmentation/pipeline.py

@@ -32,7 +32,6 @@ class SemanticSegmentationPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class SemanticSegmentationPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         semantic_segmentation_model_config = config["SubModules"][
             "SemanticSegmentation"

+ 1 - 5
paddlex/inference/pipelines_new/small_object__detection/pipeline.py

@@ -32,7 +32,6 @@ class SmallObjectDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class SmallObjectDetectionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         small_object_detection_model_config = config["SubModules"][
             "SmallObjectDetection"

+ 1 - 5
paddlex/inference/pipelines_new/table_recognition/pipeline.py

@@ -43,7 +43,6 @@ class TableRecognitionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the layout parsing pipeline.
 
@@ -52,12 +51,9 @@ class TableRecognitionPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         self.use_doc_preprocessor = config.get("use_doc_preprocessor", True)
         if self.use_doc_preprocessor:

+ 3 - 9
paddlex/inference/pipelines_new/ts_anomaly_detection/pipeline.py

@@ -1,4 +1,4 @@
-# copyright (c) 2025 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,7 +33,6 @@ class TSAnomalyDetPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the Time Series ad pipeline.
 
@@ -42,17 +41,12 @@ class TSAnomalyDetPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         ts_ad_model_config = config["SubModules"]["TSAnomalyDetection"]
-        self.ts_ad_model = self.create_model(
-            ts_ad_model_config
-        )
+        self.ts_ad_model = self.create_model(ts_ad_model_config)
 
     def predict(
         self, input: str | list[str] | pd.DataFrame | list[pd.DataFrame], **kwargs

+ 3 - 9
paddlex/inference/pipelines_new/ts_classification/pipeline.py

@@ -1,4 +1,4 @@
-# copyright (c) 2025 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,7 +33,6 @@ class TSClsPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the Time Series classification pipeline.
 
@@ -42,17 +41,12 @@ class TSClsPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         ts_classification_model_config = config["SubModules"]["TSClassification"]
-        self.ts_classification_model = self.create_model(
-            ts_classification_model_config
-        )
+        self.ts_classification_model = self.create_model(ts_classification_model_config)
 
     def predict(
         self, input: str | list[str] | pd.DataFrame | list[pd.DataFrame], **kwargs

+ 3 - 9
paddlex/inference/pipelines_new/ts_forecasting/pipeline.py

@@ -1,4 +1,4 @@
-# copyright (c) 2025 PaddlePaddle Authors. All Rights Reserve.
+# copyright (c) 2024 PaddlePaddle Authors. All Rights Reserve.
 #
 # Licensed under the Apache License, Version 2.0 (the "License");
 # you may not use this file except in compliance with the License.
@@ -33,7 +33,6 @@ class TSFcPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """Initializes the Time Series Forecast pipeline.
 
@@ -42,17 +41,12 @@ class TSFcPipeline(BasePipeline):
             device (str, optional): Device to run the predictions on. Defaults to None.
             pp_option (PaddlePredictorOption, optional): PaddlePredictor options. Defaults to None.
             use_hpip (bool, optional): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]], optional): HPIP parameters. Defaults to None.
         """
 
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         ts_forecast_model_config = config["SubModules"]["TSForecast"]
-        self.ts_forecast_model = self.create_model(
-            ts_forecast_model_config
-        )
+        self.ts_forecast_model = self.create_model(ts_forecast_model_config)
 
     def predict(
         self, input: str | list[str] | pd.DataFrame | list[pd.DataFrame], **kwargs

+ 1 - 5
paddlex/inference/pipelines_new/video_classification/pipeline.py

@@ -32,7 +32,6 @@ class VideoClassificationPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class VideoClassificationPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         video_classification_model_config = config["SubModules"]["VideoClassification"]
         self.video_classification_model = self.create_model(

+ 1 - 5
paddlex/inference/pipelines_new/video_detection/pipeline.py

@@ -32,7 +32,6 @@ class VideoDetectionPipeline(BasePipeline):
         device: str = None,
         pp_option: PaddlePredictorOption = None,
         use_hpip: bool = False,
-        hpi_params: Optional[Dict[str, Any]] = None,
     ) -> None:
         """
         Initializes the class with given configurations and options.
@@ -42,11 +41,8 @@ class VideoDetectionPipeline(BasePipeline):
             device (str): The device to run the prediction on. Default is None.
             pp_option (PaddlePredictorOption): Options for PaddlePaddle predictor. Default is None.
             use_hpip (bool): Whether to use high-performance inference (hpip) for prediction. Defaults to False.
-            hpi_params (Optional[Dict[str, Any]]): HPIP specific parameters. Default is None.
         """
-        super().__init__(
-            device=device, pp_option=pp_option, use_hpip=use_hpip, hpi_params=hpi_params
-        )
+        super().__init__(device=device, pp_option=pp_option, use_hpip=use_hpip)
 
         video_detection_model_config = config["SubModules"]["VideoDetection"]
         self.video_detection_model = self.create_model(video_detection_model_config)

+ 3 - 25
paddlex/paddlex_cli.py

@@ -123,14 +123,6 @@ def args_cfg():
         "--use_hpip", action="store_true", help="Enable HPIP acceleration if available."
     )
     pipeline_group.add_argument(
-        "--serial_number", type=str, help="Serial number for device identification."
-    )
-    pipeline_group.add_argument(
-        "--update_license",
-        action="store_true",
-        help="Update the software license information.",
-    )
-    pipeline_group.add_argument(
         "--get_pipeline_config",
         type=str,
         default=None,
@@ -214,25 +206,16 @@ def install(args):
     return
 
 
-def _get_hpi_params(serial_number, update_license):
-    return {"serial_number": serial_number, "update_license": update_license}
-
-
 def pipeline_predict(
     pipeline,
     input,
     device,
     save_path,
     use_hpip,
-    serial_number,
-    update_license,
     **pipeline_args,
 ):
     """pipeline predict"""
-    hpi_params = _get_hpi_params(serial_number, update_license)
-    pipeline = create_pipeline(
-        pipeline, device=device, use_hpip=use_hpip, hpi_params=hpi_params
-    )
+    pipeline = create_pipeline(pipeline, device=device, use_hpip=use_hpip)
     result = pipeline.predict(input, **pipeline_args)
     for res in result:
         res.print()
@@ -240,13 +223,12 @@ def pipeline_predict(
             res.save_all(save_path=save_path)
 
 
-def serve(pipeline, *, device, use_hpip, serial_number, update_license, host, port):
+def serve(pipeline, *, device, use_hpip, host, port):
     from .inference.pipelines.serving import create_pipeline_app, run_server
 
-    hpi_params = _get_hpi_params(serial_number, update_license)
     pipeline_config = load_pipeline_config(pipeline)
     pipeline = create_pipeline_from_config(
-        pipeline_config, device=device, use_hpip=use_hpip, hpi_params=hpi_params
+        pipeline_config, device=device, use_hpip=use_hpip
     )
     app = create_pipeline_app(pipeline, pipeline_config)
     run_server(app, host=host, port=port, debug=False)
@@ -270,8 +252,6 @@ def main():
             args.pipeline,
             device=args.device,
             use_hpip=args.use_hpip,
-            serial_number=args.serial_number,
-            update_license=args.update_license,
             host=args.host,
             port=args.port,
         )
@@ -294,7 +274,5 @@ def main():
                 args.device,
                 args.save_path,
                 use_hpip=args.use_hpip,
-                serial_number=args.serial_number,
-                update_license=args.update_license,
                 **pipeline_args_dict,
             )

+ 2 - 0
paddlex/utils/lazy_loader.py

@@ -16,6 +16,7 @@
 import importlib
 import types
 import os
+import inspect
 
 from . import logging
 from .flags import FLAGS_json_format_model
@@ -52,6 +53,7 @@ class LazyLoader(types.ModuleType):
         self._module = module
 
     def __getattr__(self, item):
+        logging.debug("lazy load in : %s", inspect.currentframe().f_back)
         if not self.loaded:
             # HACK: For circumventing shared library symbol conflicts when
             # importing paddlex_hpi