Pārlūkot izejas kodu

update hpi config (#2304)

zhangyubo0722 1 gadu atpakaļ
vecāks
revīzija
e35f2f1566
100 mainītis faili ar 20 papildinājumiem un 2896 dzēšanām
  1. 2 4
      paddlex/repo_apis/PaddleClas_api/cls/model.py
  2. 0 81
      paddlex/repo_apis/PaddleClas_api/cls/register.py
  3. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L.yaml
  4. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L_layout_17cls.yaml
  5. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L_layout_3cls.yaml
  6. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-M.yaml
  7. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S.yaml
  8. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S_layout_17cls.yaml
  9. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S_layout_3cls.yaml
  10. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-XS.yaml
  11. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet_LCNet_x2_5_face.yaml
  12. 1 1
      paddlex/repo_apis/PaddleDetection_api/configs/PicoDet_layout_1x.yaml
  13. 2 8
      paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py
  14. 0 3
      paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py
  15. 2 8
      paddlex/repo_apis/PaddleDetection_api/object_det/model.py
  16. 0 22
      paddlex/repo_apis/PaddleDetection_api/object_det/register.py
  17. 0 3
      paddlex/repo_apis/PaddleOCR_api/table_rec/register.py
  18. 0 5
      paddlex/repo_apis/PaddleOCR_api/text_det/register.py
  19. 2 4
      paddlex/repo_apis/PaddleOCR_api/text_rec/model.py
  20. 0 6
      paddlex/repo_apis/PaddleOCR_api/text_rec/register.py
  21. 2 8
      paddlex/repo_apis/PaddleSeg_api/seg/model.py
  22. 0 19
      paddlex/repo_apis/PaddleSeg_api/seg/register.py
  23. 0 37
      paddlex/utils/hpi_configs/CLIP_vit_base_patch16_224.yaml
  24. 0 37
      paddlex/utils/hpi_configs/CLIP_vit_large_patch14_224.yaml
  25. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_base_224.yaml
  26. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_base_384.yaml
  27. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_large_224.yaml
  28. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_large_384.yaml
  29. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_small.yaml
  30. 0 37
      paddlex/utils/hpi_configs/ConvNeXt_tiny.yaml
  31. 0 34
      paddlex/utils/hpi_configs/Deeplabv3-R101.yaml
  32. 0 34
      paddlex/utils/hpi_configs/Deeplabv3-R50.yaml
  33. 0 34
      paddlex/utils/hpi_configs/Deeplabv3_Plus-R101.yaml
  34. 0 34
      paddlex/utils/hpi_configs/Deeplabv3_Plus-R50.yaml
  35. 0 13
      paddlex/utils/hpi_configs/Mask-RT-DETR-H.yaml
  36. 0 13
      paddlex/utils/hpi_configs/Mask-RT-DETR-L.yaml
  37. 0 37
      paddlex/utils/hpi_configs/MobileNetV1_x0_25.yaml
  38. 0 37
      paddlex/utils/hpi_configs/MobileNetV1_x0_5.yaml
  39. 0 37
      paddlex/utils/hpi_configs/MobileNetV1_x0_75.yaml
  40. 0 37
      paddlex/utils/hpi_configs/MobileNetV1_x1_0.yaml
  41. 0 25
      paddlex/utils/hpi_configs/MobileNetV2_x0_25.yaml
  42. 0 25
      paddlex/utils/hpi_configs/MobileNetV2_x0_5.yaml
  43. 0 25
      paddlex/utils/hpi_configs/MobileNetV2_x1_0.yaml
  44. 0 25
      paddlex/utils/hpi_configs/MobileNetV2_x1_5.yaml
  45. 0 25
      paddlex/utils/hpi_configs/MobileNetV2_x2_0.yaml
  46. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_35.yaml
  47. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_5.yaml
  48. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_75.yaml
  49. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_large_x1_0.yaml
  50. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_large_x1_25.yaml
  51. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_35.yaml
  52. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_5.yaml
  53. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_75.yaml
  54. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_small_x1_0.yaml
  55. 0 37
      paddlex/utils/hpi_configs/MobileNetV3_small_x1_25.yaml
  56. 0 33
      paddlex/utils/hpi_configs/OCRNet_HRNet-W18.yaml
  57. 0 33
      paddlex/utils/hpi_configs/OCRNet_HRNet-W48.yaml
  58. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B0.yaml
  59. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B1.yaml
  60. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B2.yaml
  61. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B3.yaml
  62. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B4.yaml
  63. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B5.yaml
  64. 0 37
      paddlex/utils/hpi_configs/PP-HGNetV2-B6.yaml
  65. 0 37
      paddlex/utils/hpi_configs/PP-HGNet_base.yaml
  66. 0 37
      paddlex/utils/hpi_configs/PP-HGNet_small.yaml
  67. 0 37
      paddlex/utils/hpi_configs/PP-HGNet_tiny.yaml
  68. 0 37
      paddlex/utils/hpi_configs/PP-LCNetV2_base.yaml
  69. 0 37
      paddlex/utils/hpi_configs/PP-LCNetV2_large.yaml
  70. 0 37
      paddlex/utils/hpi_configs/PP-LCNetV2_small.yaml
  71. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x0_25.yaml
  72. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x0_35.yaml
  73. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x0_5.yaml
  74. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x0_75.yaml
  75. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x1_0.yaml
  76. 0 13
      paddlex/utils/hpi_configs/PP-LCNet_x1_0_doc_ori.yaml
  77. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x1_5.yaml
  78. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x2_0.yaml
  79. 0 37
      paddlex/utils/hpi_configs/PP-LCNet_x2_5.yaml
  80. 0 34
      paddlex/utils/hpi_configs/PP-LiteSeg-B.yaml
  81. 0 34
      paddlex/utils/hpi_configs/PP-LiteSeg-T.yaml
  82. 0 37
      paddlex/utils/hpi_configs/PP-OCRv4_mobile_det.yaml
  83. 0 25
      paddlex/utils/hpi_configs/PP-OCRv4_mobile_rec.yaml
  84. 0 13
      paddlex/utils/hpi_configs/PP-OCRv4_mobile_seal_det.yaml
  85. 0 37
      paddlex/utils/hpi_configs/PP-OCRv4_server_det.yaml
  86. 0 25
      paddlex/utils/hpi_configs/PP-OCRv4_server_rec.yaml
  87. 0 13
      paddlex/utils/hpi_configs/PP-OCRv4_server_seal_det.yaml
  88. 0 39
      paddlex/utils/hpi_configs/PP-YOLOE_plus-L.yaml
  89. 0 39
      paddlex/utils/hpi_configs/PP-YOLOE_plus-M.yaml
  90. 0 39
      paddlex/utils/hpi_configs/PP-YOLOE_plus-S.yaml
  91. 0 39
      paddlex/utils/hpi_configs/PP-YOLOE_plus-X.yaml
  92. 0 65
      paddlex/utils/hpi_configs/PicoDet-L.yaml
  93. 0 65
      paddlex/utils/hpi_configs/PicoDet-S.yaml
  94. 0 37
      paddlex/utils/hpi_configs/PicoDet_layout_1x.yaml
  95. 0 36
      paddlex/utils/hpi_configs/RT-DETR-H.yaml
  96. 0 36
      paddlex/utils/hpi_configs/RT-DETR-L.yaml
  97. 0 36
      paddlex/utils/hpi_configs/RT-DETR-R18.yaml
  98. 0 36
      paddlex/utils/hpi_configs/RT-DETR-R50.yaml
  99. 0 36
      paddlex/utils/hpi_configs/RT-DETR-X.yaml
  100. 0 37
      paddlex/utils/hpi_configs/ResNet101.yaml

+ 2 - 4
paddlex/repo_apis/PaddleClas_api/cls/model.py

@@ -117,8 +117,6 @@ class ClsModel(BaseModel):
             uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
             config.update([f"Global.uniform_output_enabled={uniform_output_enabled}"])
             config.update([f"Global.pdx_model_name={self.name}"])
-            hpi_config_path = self.model_info.get("hpi_config_path", None)
-            config.update([f"Global.hpi_config_path={hpi_config_path}"])
 
             config.dump(config_path)
             self._assert_empty_kwargs(kwargs)
@@ -232,9 +230,9 @@ class ClsModel(BaseModel):
             if device:
                 config.update_device(device)
             # PDX related settings
+            uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+            config.update([f"Global.uniform_output_enabled={uniform_output_enabled}"])
             config.update([f"Global.pdx_model_name={self.name}"])
-            hpi_config_path = self.model_info.get("hpi_config_path", None)
-            config.update([f"Global.hpi_config_path={hpi_config_path}"])
 
             config.dump(config_path)
 

+ 0 - 81
paddlex/repo_apis/PaddleClas_api/cls/register.py

@@ -23,7 +23,6 @@ from .config import ClsConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLECLAS_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -45,8 +44,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_tiny_patch4_window7_224.yaml",
     }
 )
 
@@ -59,8 +56,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_small_patch4_window7_224.yaml",
     }
 )
 
@@ -72,8 +67,6 @@ register_model_info(
             PDX_CONFIG_DIR, "SwinTransformer_base_patch4_window7_224.yaml"
         ),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_base_patch4_window7_224.yaml",
     }
 )
 
@@ -86,8 +79,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_base_patch4_window12_384.yaml",
     }
 )
 
@@ -100,8 +91,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_large_patch4_window7_224.yaml",
     }
 )
 
@@ -114,8 +103,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR
-        / "SwinTransformer_large_patch4_window12_384.yaml",
     }
 )
 
@@ -125,7 +112,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_25.yaml",
     }
 )
 
@@ -135,7 +121,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_35.yaml",
     }
 )
 
@@ -145,7 +130,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_5.yaml",
     }
 )
 
@@ -155,7 +139,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_75.yaml",
     }
 )
 
@@ -165,7 +148,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x1_0.yaml",
     }
 )
 
@@ -175,7 +157,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0_doc_ori.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x1_0_doc_ori.yaml",
     }
 )
 
@@ -185,7 +166,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x1_5.yaml",
     }
 )
 
@@ -195,7 +175,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x2_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x2_0.yaml",
     }
 )
 
@@ -205,7 +184,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x2_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x2_5.yaml",
     }
 )
 
@@ -215,7 +193,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_small.yaml",
     }
 )
 
@@ -225,7 +202,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_base.yaml",
     }
 )
 
@@ -235,7 +211,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_large.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_large.yaml",
     }
 )
 
@@ -245,7 +220,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_base_patch16_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "CLIP_vit_base_patch16_224.yaml",
     }
 )
 
@@ -255,7 +229,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_large_patch14_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "CLIP_vit_large_patch14_224.yaml",
     }
 )
 
@@ -265,7 +238,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_tiny.yaml",
     }
 )
 
@@ -275,7 +247,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_small.yaml",
     }
 )
 
@@ -285,7 +256,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_base.yaml",
     }
 )
 
@@ -295,7 +265,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B0.yaml",
     }
 )
 
@@ -305,7 +274,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B1.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B1.yaml",
     }
 )
 
@@ -315,7 +283,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B2.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B2.yaml",
     }
 )
 
@@ -325,7 +292,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B3.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B3.yaml",
     }
 )
 
@@ -335,7 +301,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B4.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B4.yaml",
     }
 )
 
@@ -345,7 +310,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B5.yaml",
     }
 )
 
@@ -355,7 +319,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B6.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B6.yaml",
     }
 )
 
@@ -365,7 +328,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet18.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet18.yaml",
     }
 )
 
@@ -375,7 +337,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet18_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet18_vd.yaml",
     }
 )
 
@@ -385,7 +346,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet34.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet34.yaml",
     }
 )
 
@@ -395,7 +355,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet34_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet34_vd.yaml",
     }
 )
 
@@ -405,7 +364,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet50.yaml",
     }
 )
 
@@ -415,7 +373,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet50_vd.yaml",
     }
 )
 
@@ -425,7 +382,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet101.yaml",
     }
 )
 
@@ -435,7 +391,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet101_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet101_vd.yaml",
     }
 )
 
@@ -445,7 +400,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet152.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet152.yaml",
     }
 )
 
@@ -455,7 +409,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet152_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet152_vd.yaml",
     }
 )
 
@@ -465,7 +418,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet200_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ResNet200_vd.yaml",
     }
 )
 
@@ -476,7 +428,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x0_25.yaml",
     }
 )
 
@@ -487,7 +438,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x0_5.yaml",
     }
 )
 
@@ -498,7 +448,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x0_75.yaml",
     }
 )
 
@@ -509,7 +458,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x1_0.yaml",
     }
 )
 
@@ -519,7 +467,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x0_25.yaml",
     }
 )
 
@@ -529,7 +476,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x0_5.yaml",
     }
 )
 
@@ -539,7 +485,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x1_0.yaml",
     }
 )
 
@@ -549,7 +494,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x1_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x1_5.yaml",
     }
 )
 
@@ -559,7 +503,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x2_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x2_0.yaml",
     }
 )
 
@@ -569,7 +512,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_35.yaml",
     }
 )
 
@@ -579,7 +521,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_5.yaml",
     }
 )
 
@@ -589,7 +530,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_75.yaml",
     }
 )
 
@@ -599,7 +539,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x1_0.yaml",
     }
 )
 
@@ -609,7 +548,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x1_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x1_25.yaml",
     }
 )
 
@@ -619,7 +557,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x1_25.yaml",
     }
 )
 
@@ -629,7 +566,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x0_5.yaml",
     }
 )
 
@@ -639,7 +575,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x0_75.yaml",
     }
 )
 
@@ -649,7 +584,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x1_0.yaml",
     }
 )
 
@@ -659,7 +593,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x1_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x1_25.yaml",
     }
 )
 
@@ -714,7 +647,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_tiny.yaml",
     }
 )
 
@@ -724,7 +656,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_small.yaml",
     }
 )
 
@@ -734,7 +665,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_base_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_base_224.yaml",
     }
 )
 
@@ -744,7 +674,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_base_384.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_base_384.yaml",
     }
 )
 
@@ -754,7 +683,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_large_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_large_224.yaml",
     }
 )
 
@@ -764,7 +692,6 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_large_384.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_large_384.yaml",
     }
 )
 
@@ -865,7 +792,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -876,7 +802,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -887,7 +812,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B0_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -898,7 +822,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B4_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -909,7 +832,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B6_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -920,7 +842,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_base_patch16_448_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -933,7 +854,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 
@@ -944,7 +864,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0_vehicle_attribute.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
-        "hpi_config_path": None,
     }
 )
 

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L_layout_17cls.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-L_layout_3cls.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-M.yaml

@@ -71,7 +71,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S_layout_17cls.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-S_layout_3cls.yaml

@@ -68,7 +68,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet-XS.yaml

@@ -71,7 +71,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet_LCNet_x2_5_face.yaml

@@ -127,7 +127,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, *eval_height, *eval_width]
+    image_shape: [3, *eval_height, *eval_width]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: *eval_size, keep_ratio: False}

+ 1 - 1
paddlex/repo_apis/PaddleDetection_api/configs/PicoDet_layout_1x.yaml

@@ -66,7 +66,7 @@ EvalReader:
 
 TestReader:
   inputs_def:
-    image_shape: [1, 3, 800, 608]
+    image_shape: [3, 800, 608]
   sample_transforms:
   - Decode: {}
   - Resize: {interp: 2, target_size: [800, 608], keep_ratio: False}

+ 2 - 8
paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py

@@ -129,10 +129,6 @@ class InstanceSegModel(BaseModel):
         uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
         config.update({"uniform_output_enabled": uniform_output_enabled})
         config.update({"pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.update({"hpi_config_path": hpi_config_path})
 
         self._assert_empty_kwargs(kwargs)
 
@@ -281,11 +277,9 @@ class InstanceSegModel(BaseModel):
             cli_args.append(CLIArgument("-o", f"exclude_nms={bool(exclude_nms)}"))
 
         # PDX related settings
+        uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+        config.update({"uniform_output_enabled": uniform_output_enabled})
         config.update({"pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.update({"hpi_config_path": hpi_config_path})
 
         self._assert_empty_kwargs(kwargs)
 

+ 0 - 3
paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py

@@ -23,7 +23,6 @@ from .runner import InstanceSegRunner
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEDETECTION_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -78,7 +77,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "Mask-RT-DETR-L.yaml",
     }
 )
 
@@ -109,7 +107,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "Mask-RT-DETR-H.yaml",
     }
 )
 

+ 2 - 8
paddlex/repo_apis/PaddleDetection_api/object_det/model.py

@@ -133,10 +133,6 @@ class DetModel(BaseModel):
         uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
         config.update({"uniform_output_enabled": uniform_output_enabled})
         config.update({"pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.update({"hpi_config_path": hpi_config_path})
 
         self._assert_empty_kwargs(kwargs)
 
@@ -285,11 +281,9 @@ class DetModel(BaseModel):
             cli_args.append(CLIArgument("-o", f"exclude_nms={bool(exclude_nms)}"))
 
         # PDX related settings
+        uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+        config.update({"uniform_output_enabled": uniform_output_enabled})
         config.update({"pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.update({"hpi_config_path": hpi_config_path})
 
         if self.name in official_categories.keys():
             anno_val_file = abspath(

+ 0 - 22
paddlex/repo_apis/PaddleDetection_api/object_det/register.py

@@ -23,7 +23,6 @@ from .runner import DetRunner
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEDETECTION_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -46,7 +45,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "compression"],
         "supported_dataset_types": ["COCODetDataset"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet-S.yaml",
     }
 )
 
@@ -60,7 +58,6 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "compression"],
         "supported_dataset_types": ["COCODetDataset"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet-L.yaml",
     }
 )
 
@@ -79,7 +76,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-S.yaml",
     }
 )
 
@@ -98,7 +94,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-M.yaml",
     }
 )
 
@@ -114,7 +109,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-L.yaml",
     }
 )
 
@@ -130,7 +124,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-X.yaml",
     }
 )
 
@@ -146,7 +139,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-L.yaml",
     }
 )
 
@@ -162,7 +154,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-H.yaml",
     }
 )
 
@@ -178,7 +169,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-X.yaml",
     }
 )
 
@@ -194,7 +184,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-R18.yaml",
     }
 )
 
@@ -210,7 +199,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-R50.yaml",
     }
 )
 
@@ -226,7 +214,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet_layout_1x.yaml",
     }
 )
 
@@ -242,7 +229,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-DarkNet53.yaml",
     }
 )
 
@@ -258,7 +244,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-MobileNetV3.yaml",
     }
 )
 
@@ -274,7 +259,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-ResNet50_vd_DCN.yaml",
     }
 )
 
@@ -290,7 +274,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-L.yaml",
     }
 )
 
@@ -306,7 +289,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-M.yaml",
     }
 )
 
@@ -322,7 +304,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-N.yaml",
     }
 )
 
@@ -338,7 +319,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-S.yaml",
     }
 )
 
@@ -354,7 +334,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-T.yaml",
     }
 )
 
@@ -370,7 +349,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
-        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-X.yaml",
     }
 )
 

+ 0 - 3
paddlex/repo_apis/PaddleOCR_api/table_rec/register.py

@@ -23,7 +23,6 @@ from .config import TableRecConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -41,7 +40,6 @@ register_model_info(
         "suite": "TableRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "SLANet.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SLANet.yaml",
     }
 )
 
@@ -51,6 +49,5 @@ register_model_info(
         "suite": "TableRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "SLANet_plus.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SLANet_plus.yaml",
     }
 )

+ 0 - 5
paddlex/repo_apis/PaddleOCR_api/text_det/register.py

@@ -23,7 +23,6 @@ from .config import TextDetConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -42,7 +41,6 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_mobile_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_mobile_det.yaml",
     }
 )
 
@@ -52,7 +50,6 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_server_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_server_det.yaml",
     }
 )
 
@@ -62,7 +59,6 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_server_seal_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_server_seal_det.yaml",
     }
 )
 
@@ -72,6 +68,5 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_mobile_seal_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_mobile_seal_det.yaml",
     }
 )

+ 2 - 4
paddlex/repo_apis/PaddleOCR_api/text_rec/model.py

@@ -137,8 +137,6 @@ class TextRecModel(BaseModel):
         uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
         config.update({"Global.uniform_output_enabled": uniform_output_enabled})
         config.update({"Global.pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        config.update({"Global.hpi_config_path": hpi_config_path})
 
         self._assert_empty_kwargs(kwargs)
 
@@ -270,9 +268,9 @@ class TextRecModel(BaseModel):
             config.update_class_path(class_path)
 
         # PDX related settings
+        uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+        config.update({"Global.uniform_output_enabled": uniform_output_enabled})
         config.update({"Global.pdx_model_name": self.name})
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        config.update({"Global.hpi_config_path": hpi_config_path})
 
         self._assert_empty_kwargs(kwargs)
 

+ 0 - 6
paddlex/repo_apis/PaddleOCR_api/text_rec/register.py

@@ -23,7 +23,6 @@ from .config import TextRecConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -41,7 +40,6 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_mobile_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_mobile_rec.yaml",
     }
 )
 
@@ -51,7 +49,6 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_server_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_server_rec.yaml",
     }
 )
 
@@ -61,7 +58,6 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "ch_SVTRv2_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ch_SVTRv2_rec.yaml",
     }
 )
 
@@ -71,7 +67,6 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "ch_RepSVTR_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
-        "hpi_config_path": HPI_CONFIG_DIR / "ch_RepSVTR_rec.yaml",
     }
 )
 
@@ -81,6 +76,5 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "LaTeX_OCR_rec.yml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
-        "hpi_config_path": None,
     }
 )

+ 2 - 8
paddlex/repo_apis/PaddleSeg_api/seg/model.py

@@ -169,10 +169,6 @@ class SegModel(BaseModel):
         uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
         config.set_val("uniform_output_enabled", uniform_output_enabled)
         config.set_val("pdx_model_name", self.name)
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.set_val("hpi_config_path", hpi_config_path)
 
         self._assert_empty_kwargs(kwargs)
 
@@ -355,11 +351,9 @@ class SegModel(BaseModel):
             cli_args.append(CLIArgument("--output_op", output_op))
 
         # PDX related settings
+        uniform_output_enabled = kwargs.pop("uniform_output_enabled", True)
+        config.set_val("uniform_output_enabled", uniform_output_enabled)
         config.set_val("pdx_model_name", self.name)
-        hpi_config_path = self.model_info.get("hpi_config_path", None)
-        if hpi_config_path:
-            hpi_config_path = hpi_config_path.as_posix()
-        config.set_val("hpi_config_path", hpi_config_path)
 
         self._assert_empty_kwargs(kwargs)
 

+ 0 - 19
paddlex/repo_apis/PaddleSeg_api/seg/register.py

@@ -23,7 +23,6 @@ from .config import SegConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLESEG_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
-HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -43,7 +42,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "OCRNet_HRNet-W48.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "OCRNet_HRNet-W48.yaml",
     }
 )
 
@@ -53,7 +51,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "OCRNet_HRNet-W18.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "OCRNet_HRNet-W18.yaml",
     }
 )
 
@@ -76,7 +73,6 @@ register_model_info(
         "supported_predict_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_infer_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_dataset_types": [],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LiteSeg-T.yaml",
     }
 )
 
@@ -99,7 +95,6 @@ register_model_info(
         "supported_predict_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_infer_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_dataset_types": [],
-        "hpi_config_path": HPI_CONFIG_DIR / "PP-LiteSeg-B.yaml",
     }
 )
 
@@ -110,7 +105,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_base.yaml",
     }
 )
 
@@ -120,7 +114,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_tiny.yaml",
     }
 )
 
@@ -130,7 +123,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_small.yaml",
     }
 )
 
@@ -140,7 +132,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_large.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_large.yaml",
     }
 )
 
@@ -151,7 +142,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B0.yaml",
     }
 )
 
@@ -161,7 +151,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B1.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B1.yaml",
     }
 )
 
@@ -171,7 +160,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B2.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B2.yaml",
     }
 )
 
@@ -181,7 +169,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B3.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B3.yaml",
     }
 )
 
@@ -191,7 +178,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B4.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B4.yaml",
     }
 )
 
@@ -201,7 +187,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B5.yaml",
     }
 )
 
@@ -212,7 +197,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3-R50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3-R50.yaml",
     }
 )
 
@@ -222,7 +206,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3-R101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3-R101.yaml",
     }
 )
 
@@ -232,7 +215,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3_Plus-R50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3_Plus-R50.yaml",
     }
 )
 
@@ -242,7 +224,6 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3_Plus-R101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
-        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3_Plus-R101.yaml",
     }
 )
 

+ 0 - 37
paddlex/utils/hpi_configs/CLIP_vit_base_patch16_224.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/CLIP_vit_large_patch14_224.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_base_224.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_base_384.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_large_224.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_large_384.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_small.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ConvNeXt_tiny.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/Deeplabv3-R101.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/Deeplabv3-R50.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/Deeplabv3_Plus-R101.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/Deeplabv3_Plus-R50.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 13
paddlex/utils/hpi_configs/Mask-RT-DETR-H.yaml

@@ -1,13 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_infer
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer

+ 0 - 13
paddlex/utils/hpi_configs/Mask-RT-DETR-L.yaml

@@ -1,13 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_infer
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV1_x0_25.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV1_x0_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV1_x0_75.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV1_x1_0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/MobileNetV2_x0_25.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/MobileNetV2_x0_5.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/MobileNetV2_x1_0.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/MobileNetV2_x1_5.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/MobileNetV2_x2_0.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_large_x0_35.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_large_x0_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_large_x0_75.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_large_x1_0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_large_x1_25.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_small_x0_35.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_small_x0_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_small_x0_75.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_small_x1_0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/MobileNetV3_small_x1_25.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 33
paddlex/utils/hpi_configs/OCRNet_HRNet-W18.yaml

@@ -1,33 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - tensorrt

+ 0 - 33
paddlex/utils/hpi_configs/OCRNet_HRNet-W48.yaml

@@ -1,33 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B1.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B2.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B3.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B4.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNetV2-B6.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNet_base.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNet_small.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-HGNet_tiny.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNetV2_base.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNetV2_large.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNetV2_small.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x0_25.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x0_35.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x0_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x0_75.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x1_0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 13
paddlex/utils/hpi_configs/PP-LCNet_x1_0_doc_ori.yaml

@@ -1,13 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_infer
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x1_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x2_0.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-LCNet_x2_5.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/PP-LiteSeg-B.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 34
paddlex/utils/hpi_configs/PP-LiteSeg-T.yaml

@@ -1,34 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PP-OCRv4_mobile_det.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/PP-OCRv4_mobile_rec.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 13
paddlex/utils/hpi_configs/PP-OCRv4_mobile_seal_det.yaml

@@ -1,13 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_infer
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer

+ 0 - 37
paddlex/utils/hpi_configs/PP-OCRv4_server_det.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 25
paddlex/utils/hpi_configs/PP-OCRv4_server_rec.yaml

@@ -1,25 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 13
paddlex/utils/hpi_configs/PP-OCRv4_server_seal_det.yaml

@@ -1,13 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_infer
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer

+ 0 - 39
paddlex/utils/hpi_configs/PP-YOLOE_plus-L.yaml

@@ -1,39 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 39
paddlex/utils/hpi_configs/PP-YOLOE_plus-M.yaml

@@ -1,39 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 39
paddlex/utils/hpi_configs/PP-YOLOE_plus-S.yaml

@@ -1,39 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 39
paddlex/utils/hpi_configs/PP-YOLOE_plus-X.yaml

@@ -1,39 +0,0 @@
-Hpi:
-  backend_config:
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: openvino
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 65
paddlex/utils/hpi_configs/PicoDet-L.yaml

@@ -1,65 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 65
paddlex/utils/hpi_configs/PicoDet-S.yaml

@@ -1,65 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/PicoDet_layout_1x.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        image:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        image:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

+ 0 - 36
paddlex/utils/hpi_configs/RT-DETR-H.yaml

@@ -1,36 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 36
paddlex/utils/hpi_configs/RT-DETR-L.yaml

@@ -1,36 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 36
paddlex/utils/hpi_configs/RT-DETR-R18.yaml

@@ -1,36 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 36
paddlex/utils/hpi_configs/RT-DETR-R50.yaml

@@ -1,36 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 36
paddlex/utils/hpi_configs/RT-DETR-X.yaml

@@ -1,36 +0,0 @@
-Hpi:
-  backend_config:
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        im_shape:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-        image:
-        - []
-        - []
-        - []
-        scale_factor:
-        - - 1
-          - 2
-        - - 1
-          - 2
-        - - 1
-          - 2
-      enable_log_info: false
-      max_batch_size: null
-  selected_backends:
-    cpu: paddle_infer
-    gpu: paddle_tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt

+ 0 - 37
paddlex/utils/hpi_configs/ResNet101.yaml

@@ -1,37 +0,0 @@
-Hpi:
-  backend_config:
-    onnx_runtime:
-      cpu_num_threads: 8
-    openvino:
-      cpu_num_threads: 8
-    paddle_infer:
-      cpu_num_threads: 8
-      enable_log_info: false
-    paddle_tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      enable_log_info: false
-      max_batch_size: null
-    tensorrt:
-      dynamic_shapes:
-        x:
-        - []
-        - []
-        - []
-      max_batch_size: null
-  selected_backends:
-    cpu: onnx_runtime
-    gpu: tensorrt
-  supported_backends:
-    cpu:
-    - paddle_infer
-    - openvino
-    - onnx_runtime
-    gpu:
-    - paddle_infer
-    - paddle_tensorrt
-    - onnx_runtime
-    - tensorrt

Daži faili netika attēloti, jo izmaiņu fails ir pārāk liels