gaotingquan пре 1 година
родитељ
комит
1efe3dcd38
100 измењених фајлова са 2969 додато и 1 уклоњено
  1. 11 0
      paddlex/repo_apis/PaddleClas_api/cls/model.py
  2. 79 0
      paddlex/repo_apis/PaddleClas_api/cls/register.py
  3. 14 0
      paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py
  4. 4 0
      paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py
  5. 15 1
      paddlex/repo_apis/PaddleDetection_api/object_det/model.py
  6. 23 0
      paddlex/repo_apis/PaddleDetection_api/object_det/register.py
  7. 3 0
      paddlex/repo_apis/PaddleOCR_api/table_rec/register.py
  8. 4 0
      paddlex/repo_apis/PaddleOCR_api/text_det/register.py
  9. 10 0
      paddlex/repo_apis/PaddleOCR_api/text_rec/model.py
  10. 7 0
      paddlex/repo_apis/PaddleOCR_api/text_rec/register.py
  11. 14 0
      paddlex/repo_apis/PaddleSeg_api/seg/model.py
  12. 19 0
      paddlex/repo_apis/PaddleSeg_api/seg/register.py
  13. 35 0
      paddlex/utils/hpi_configs/CLIP_vit_base_patch16_224.yaml
  14. 35 0
      paddlex/utils/hpi_configs/CLIP_vit_large_patch14_224.yaml
  15. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_base_224.yaml
  16. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_base_384.yaml
  17. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_large_224.yaml
  18. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_large_384.yaml
  19. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_small.yaml
  20. 35 0
      paddlex/utils/hpi_configs/ConvNeXt_tiny.yaml
  21. 32 0
      paddlex/utils/hpi_configs/Deeplabv3-R101.yaml
  22. 32 0
      paddlex/utils/hpi_configs/Deeplabv3-R50.yaml
  23. 32 0
      paddlex/utils/hpi_configs/Deeplabv3_Plus-R101.yaml
  24. 32 0
      paddlex/utils/hpi_configs/Deeplabv3_Plus-R50.yaml
  25. 13 0
      paddlex/utils/hpi_configs/Mask-RT-DETR-H.yaml
  26. 13 0
      paddlex/utils/hpi_configs/Mask-RT-DETR-L.yaml
  27. 35 0
      paddlex/utils/hpi_configs/MobileNetV1_x0_25.yaml
  28. 35 0
      paddlex/utils/hpi_configs/MobileNetV1_x0_5.yaml
  29. 35 0
      paddlex/utils/hpi_configs/MobileNetV1_x0_75.yaml
  30. 35 0
      paddlex/utils/hpi_configs/MobileNetV1_x1_0.yaml
  31. 24 0
      paddlex/utils/hpi_configs/MobileNetV2_x0_25.yaml
  32. 24 0
      paddlex/utils/hpi_configs/MobileNetV2_x0_5.yaml
  33. 24 0
      paddlex/utils/hpi_configs/MobileNetV2_x1_0.yaml
  34. 24 0
      paddlex/utils/hpi_configs/MobileNetV2_x1_5.yaml
  35. 24 0
      paddlex/utils/hpi_configs/MobileNetV2_x2_0.yaml
  36. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_35.yaml
  37. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_5.yaml
  38. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_large_x0_75.yaml
  39. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_large_x1_0.yaml
  40. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_large_x1_25.yaml
  41. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_35.yaml
  42. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_5.yaml
  43. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_small_x0_75.yaml
  44. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_small_x1_0.yaml
  45. 35 0
      paddlex/utils/hpi_configs/MobileNetV3_small_x1_25.yaml
  46. 31 0
      paddlex/utils/hpi_configs/OCRNet_HRNet-W18.yaml
  47. 31 0
      paddlex/utils/hpi_configs/OCRNet_HRNet-W48.yaml
  48. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B0.yaml
  49. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B1.yaml
  50. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B2.yaml
  51. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B3.yaml
  52. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B4.yaml
  53. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B5.yaml
  54. 35 0
      paddlex/utils/hpi_configs/PP-HGNetV2-B6.yaml
  55. 35 0
      paddlex/utils/hpi_configs/PP-HGNet_base.yaml
  56. 35 0
      paddlex/utils/hpi_configs/PP-HGNet_small.yaml
  57. 35 0
      paddlex/utils/hpi_configs/PP-HGNet_tiny.yaml
  58. 35 0
      paddlex/utils/hpi_configs/PP-LCNetV2_base.yaml
  59. 35 0
      paddlex/utils/hpi_configs/PP-LCNetV2_large.yaml
  60. 35 0
      paddlex/utils/hpi_configs/PP-LCNetV2_small.yaml
  61. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x0_25.yaml
  62. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x0_35.yaml
  63. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x0_5.yaml
  64. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x0_75.yaml
  65. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x1_0.yaml
  66. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x1_5.yaml
  67. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x2_0.yaml
  68. 35 0
      paddlex/utils/hpi_configs/PP-LCNet_x2_5.yaml
  69. 32 0
      paddlex/utils/hpi_configs/PP-LiteSeg-T.yaml
  70. 35 0
      paddlex/utils/hpi_configs/PP-OCRv4_mobile_det.yaml
  71. 24 0
      paddlex/utils/hpi_configs/PP-OCRv4_mobile_rec.yaml
  72. 35 0
      paddlex/utils/hpi_configs/PP-OCRv4_server_det.yaml
  73. 24 0
      paddlex/utils/hpi_configs/PP-OCRv4_server_rec.yaml
  74. 24 0
      paddlex/utils/hpi_configs/PP-YOLOE_plus-L.yaml
  75. 24 0
      paddlex/utils/hpi_configs/PP-YOLOE_plus-M.yaml
  76. 24 0
      paddlex/utils/hpi_configs/PP-YOLOE_plus-S.yaml
  77. 24 0
      paddlex/utils/hpi_configs/PP-YOLOE_plus-X.yaml
  78. 35 0
      paddlex/utils/hpi_configs/PicoDet-L.yaml
  79. 35 0
      paddlex/utils/hpi_configs/PicoDet-S.yaml
  80. 35 0
      paddlex/utils/hpi_configs/PicoDet_layout_1x.yaml
  81. 21 0
      paddlex/utils/hpi_configs/RT-DETR-H.yaml
  82. 21 0
      paddlex/utils/hpi_configs/RT-DETR-L.yaml
  83. 21 0
      paddlex/utils/hpi_configs/RT-DETR-R18.yaml
  84. 21 0
      paddlex/utils/hpi_configs/RT-DETR-R50.yaml
  85. 21 0
      paddlex/utils/hpi_configs/RT-DETR-X.yaml
  86. 35 0
      paddlex/utils/hpi_configs/ResNet101.yaml
  87. 35 0
      paddlex/utils/hpi_configs/ResNet101_vd.yaml
  88. 35 0
      paddlex/utils/hpi_configs/ResNet152.yaml
  89. 35 0
      paddlex/utils/hpi_configs/ResNet152_vd.yaml
  90. 35 0
      paddlex/utils/hpi_configs/ResNet18.yaml
  91. 35 0
      paddlex/utils/hpi_configs/ResNet18_vd.yaml
  92. 35 0
      paddlex/utils/hpi_configs/ResNet200_vd.yaml
  93. 35 0
      paddlex/utils/hpi_configs/ResNet34.yaml
  94. 35 0
      paddlex/utils/hpi_configs/ResNet34_vd.yaml
  95. 35 0
      paddlex/utils/hpi_configs/ResNet50.yaml
  96. 35 0
      paddlex/utils/hpi_configs/ResNet50_vd.yaml
  97. 21 0
      paddlex/utils/hpi_configs/SLANet.yaml
  98. 21 0
      paddlex/utils/hpi_configs/SeaFormer_base.yaml
  99. 21 0
      paddlex/utils/hpi_configs/SeaFormer_large.yaml
  100. 21 0
      paddlex/utils/hpi_configs/SeaFormer_small.yaml

+ 11 - 0
paddlex/repo_apis/PaddleClas_api/cls/model.py

@@ -113,6 +113,11 @@ class ClsModel(BaseModel):
             else:
                 config._update_amp(amp)
 
+            # PDX related settings
+            config.update([f"Global.pdx_model_name={self.name}"])
+            hpi_config_path = self.model_info.get("hpi_config_path", None)
+            config.update([f"Global.hpi_config_path={hpi_config_path}"])
+
             config.dump(config_path)
             self._assert_empty_kwargs(kwargs)
 
@@ -222,6 +227,12 @@ class ClsModel(BaseModel):
             config = self.config.copy()
             config.update_pretrained_weights(weight_path)
             config._update_save_inference_dir(save_dir)
+
+            # PDX related settings
+            config.update([f"Global.pdx_model_name={self.name}"])
+            hpi_config_path = self.model_info.get("hpi_config_path", None)
+            config.update([f"Global.hpi_config_path={hpi_config_path}"])
+
             config.dump(config_path)
 
             self._assert_empty_kwargs(kwargs)

+ 79 - 0
paddlex/repo_apis/PaddleClas_api/cls/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import ClsModel
@@ -22,6 +23,7 @@ from .config import ClsConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLECLAS_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -43,6 +45,8 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_tiny_patch4_window7_224.yaml",
     }
 )
 
@@ -55,6 +59,8 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_small_patch4_window7_224.yaml",
     }
 )
 
@@ -66,6 +72,8 @@ register_model_info(
             PDX_CONFIG_DIR, "SwinTransformer_base_patch4_window7_224.yaml"
         ),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_base_patch4_window7_224.yaml",
     }
 )
 
@@ -78,6 +86,8 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_base_patch4_window12_384.yaml",
     }
 )
 
@@ -90,6 +100,8 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_large_patch4_window7_224.yaml",
     }
 )
 
@@ -102,6 +114,8 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR
+        / "SwinTransformer_large_patch4_window12_384.yaml",
     }
 )
 
@@ -111,6 +125,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_25.yaml",
     }
 )
 
@@ -120,6 +135,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_35.yaml",
     }
 )
 
@@ -129,6 +145,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_5.yaml",
     }
 )
 
@@ -138,6 +155,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x0_75.yaml",
     }
 )
 
@@ -147,6 +165,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x1_0.yaml",
     }
 )
 
@@ -156,6 +175,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x1_5.yaml",
     }
 )
 
@@ -165,6 +185,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x2_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x2_0.yaml",
     }
 )
 
@@ -174,6 +195,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x2_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNet_x2_5.yaml",
     }
 )
 
@@ -183,6 +205,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_small.yaml",
     }
 )
 
@@ -192,6 +215,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_base.yaml",
     }
 )
 
@@ -201,6 +225,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNetV2_large.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LCNetV2_large.yaml",
     }
 )
 
@@ -210,6 +235,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_base_patch16_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "CLIP_vit_base_patch16_224.yaml",
     }
 )
 
@@ -219,6 +245,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_large_patch14_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "CLIP_vit_large_patch14_224.yaml",
     }
 )
 
@@ -228,6 +255,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_tiny.yaml",
     }
 )
 
@@ -237,6 +265,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_small.yaml",
     }
 )
 
@@ -246,6 +275,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNet_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNet_base.yaml",
     }
 )
 
@@ -255,6 +285,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B0.yaml",
     }
 )
 
@@ -264,6 +295,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B1.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B1.yaml",
     }
 )
 
@@ -273,6 +305,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B2.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B2.yaml",
     }
 )
 
@@ -282,6 +315,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B3.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B3.yaml",
     }
 )
 
@@ -291,6 +325,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B4.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B4.yaml",
     }
 )
 
@@ -300,6 +335,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B5.yaml",
     }
 )
 
@@ -309,6 +345,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B6.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-HGNetV2-B6.yaml",
     }
 )
 
@@ -318,6 +355,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet18.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet18.yaml",
     }
 )
 
@@ -327,6 +365,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet18_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet18_vd.yaml",
     }
 )
 
@@ -336,6 +375,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet34.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet34.yaml",
     }
 )
 
@@ -345,6 +385,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet34_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet34_vd.yaml",
     }
 )
 
@@ -354,6 +395,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet50.yaml",
     }
 )
 
@@ -363,6 +405,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet50_vd.yaml",
     }
 )
 
@@ -372,6 +415,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet101.yaml",
     }
 )
 
@@ -381,6 +425,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet101_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet101_vd.yaml",
     }
 )
 
@@ -390,6 +435,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet152.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet152.yaml",
     }
 )
 
@@ -399,6 +445,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet152_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet152_vd.yaml",
     }
 )
 
@@ -408,6 +455,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet200_vd.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ResNet200_vd.yaml",
     }
 )
 
@@ -418,6 +466,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x0_25.yaml",
     }
 )
 
@@ -428,6 +477,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR / ".yaml",
     }
 )
 
@@ -438,6 +488,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x0_5.yaml",
     }
 )
 
@@ -448,6 +499,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV1_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV1_x1_0.yaml",
     }
 )
 
@@ -457,6 +509,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x0_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x0_25.yaml",
     }
 )
 
@@ -466,6 +519,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x0_5.yaml",
     }
 )
 
@@ -475,6 +529,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x1_0.yaml",
     }
 )
 
@@ -484,6 +539,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x1_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x1_5.yaml",
     }
 )
 
@@ -493,6 +549,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV2_x2_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV2_x2_0.yaml",
     }
 )
 
@@ -502,6 +559,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_35.yaml",
     }
 )
 
@@ -511,6 +569,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_5.yaml",
     }
 )
 
@@ -520,6 +579,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x0_75.yaml",
     }
 )
 
@@ -529,6 +589,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x1_0.yaml",
     }
 )
 
@@ -538,6 +599,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_large_x1_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / ".yaml",
     }
 )
 
@@ -547,6 +609,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_35.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_large_x1_25.yaml",
     }
 )
 
@@ -556,6 +619,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x0_5.yaml",
     }
 )
 
@@ -565,6 +629,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x0_75.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x0_75.yaml",
     }
 )
 
@@ -574,6 +639,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x1_0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x1_0.yaml",
     }
 )
 
@@ -583,6 +649,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "MobileNetV3_small_x1_25.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "MobileNetV3_small_x1_25.yaml",
     }
 )
 
@@ -592,6 +659,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_tiny.yaml",
     }
 )
 
@@ -601,6 +669,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_small.yaml",
     }
 )
 
@@ -610,6 +679,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_base_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_base_224.yaml",
     }
 )
 
@@ -619,6 +689,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_base_384.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_base_384.yaml",
     }
 )
 
@@ -628,6 +699,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_large_224.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_large_224.yaml",
     }
 )
 
@@ -637,6 +709,7 @@ register_model_info(
         "suite": "Cls",
         "config_path": osp.join(PDX_CONFIG_DIR, "ConvNeXt_large_384.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ConvNeXt_large_384.yaml",
     }
 )
 
@@ -647,6 +720,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-LCNet_x1_0_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )
 
@@ -657,6 +731,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "ResNet50_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )
 
@@ -667,6 +742,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B0_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )
 
@@ -677,6 +753,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B4_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )
 
@@ -687,6 +764,7 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-HGNetV2-B6_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )
 
@@ -697,5 +775,6 @@ register_model_info(
         "config_path": osp.join(PDX_CONFIG_DIR, "CLIP_vit_base_patch16_448_ML.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
         "infer_config": "deploy/configs/inference_cls.yaml",
+        "hpi_config_path": None,
     }
 )

+ 14 - 0
paddlex/repo_apis/PaddleDetection_api/instance_seg/model.py

@@ -124,6 +124,13 @@ class InstanceSegModel(BaseModel):
             if enable_ce is not None:
                 cli_args.append(CLIArgument("--enable_ce", enable_ce))
 
+        # PDX related settings
+        config.update({"pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.update({"hpi_config_path": hpi_config_path})
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:
@@ -265,6 +272,13 @@ class InstanceSegModel(BaseModel):
         if exclude_nms is not None:
             cli_args.append(CLIArgument("-o", f"exclude_nms={bool(exclude_nms)}"))
 
+        # PDX related settings
+        config.update({"pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.update({"hpi_config_path": hpi_config_path})
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:

+ 4 - 0
paddlex/repo_apis/PaddleDetection_api/instance_seg/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import InstanceSegModel
@@ -22,6 +23,7 @@ from .runner import InstanceSegRunner
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEDETECTION_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -76,6 +78,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "Mask-RT-DETR-L.yaml",
     }
 )
 
@@ -106,6 +109,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "Mask-RT-DETR-H.yaml",
     }
 )
 

+ 15 - 1
paddlex/repo_apis/PaddleDetection_api/object_det/model.py

@@ -125,7 +125,14 @@ class DetModel(BaseModel):
             if enable_ce is not None:
                 cli_args.append(CLIArgument("--enable_ce", enable_ce))
 
-            self._assert_empty_kwargs(kwargs)
+        # PDX related settings
+        config.update({"pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.update({"hpi_config_path": hpi_config_path})
+
+        self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:
             config.dump(config_path)
@@ -266,6 +273,13 @@ class DetModel(BaseModel):
         if exclude_nms is not None:
             cli_args.append(CLIArgument("-o", f"exclude_nms={bool(exclude_nms)}"))
 
+        # PDX related settings
+        config.update({"pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.update({"hpi_config_path": hpi_config_path})
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:

+ 23 - 0
paddlex/repo_apis/PaddleDetection_api/object_det/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import DetModel
@@ -22,6 +23,7 @@ from .runner import DetRunner
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEDETECTION_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -44,6 +46,7 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "compression"],
         "supported_dataset_types": ["COCODetDataset"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet-S.yaml",
     }
 )
 
@@ -57,6 +60,7 @@ register_model_info(
         ),
         "supported_apis": ["train", "evaluate", "predict", "export", "compression"],
         "supported_dataset_types": ["COCODetDataset"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet-L.yaml",
     }
 )
 
@@ -75,6 +79,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-S.yaml",
     }
 )
 
@@ -93,6 +98,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-M.yaml",
     }
 )
 
@@ -108,6 +114,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-L.yaml",
     }
 )
 
@@ -123,6 +130,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["O1", "O2"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-YOLOE_plus-X.yaml",
     }
 )
 
@@ -138,6 +146,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-L.yaml",
     }
 )
 
@@ -153,6 +162,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-H.yaml",
     }
 )
 
@@ -168,6 +178,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-X.yaml",
     }
 )
 
@@ -183,6 +194,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-R18.yaml",
     }
 )
 
@@ -198,6 +210,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "RT-DETR-R50.yaml",
     }
 )
 
@@ -213,6 +226,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "PicoDet_layout_1x.yaml",
     }
 )
 
@@ -228,6 +242,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-DarkNet53.yaml",
     }
 )
 
@@ -243,6 +258,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-MobileNetV3.yaml",
     }
 )
 
@@ -258,6 +274,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOv3-ResNet50_vd_DCN.yaml",
     }
 )
 
@@ -273,6 +290,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-L.yaml",
     }
 )
 
@@ -288,6 +306,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-M.yaml",
     }
 )
 
@@ -303,6 +322,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-N.yaml",
     }
 )
 
@@ -318,6 +338,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-S.yaml",
     }
 )
 
@@ -333,6 +354,7 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-T.yaml",
     }
 )
 
@@ -348,5 +370,6 @@ register_model_info(
             "dy2st": False,
             "amp": ["OFF"],
         },
+        "hpi_config_path": HPI_CONFIG_DIR / "YOLOX-X.yaml",
     }
 )

+ 3 - 0
paddlex/repo_apis/PaddleOCR_api/table_rec/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import TableRecModel
@@ -22,6 +23,7 @@ from .config import TableRecConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -39,5 +41,6 @@ register_model_info(
         "suite": "TableRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "SLANet.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SLANet.yaml",
     }
 )

+ 4 - 0
paddlex/repo_apis/PaddleOCR_api/text_det/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import TextDetModel
@@ -22,6 +23,7 @@ from .config import TextDetConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -40,6 +42,7 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_mobile_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_mobile_det.yaml",
     }
 )
 
@@ -49,5 +52,6 @@ register_model_info(
         "suite": "TextDet",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_server_det.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_server_det.yaml",
     }
 )

+ 10 - 0
paddlex/repo_apis/PaddleOCR_api/text_rec/model.py

@@ -131,6 +131,11 @@ class TextRecModel(BaseModel):
                 for env_name, env_value in envs.items():
                     os.environ[env_name] = str(env_value)
 
+        # PDX related settings
+        config.update({"Global.pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        config.update({"Global.hpi_config_path": hpi_config_path})
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:
@@ -256,6 +261,11 @@ class TextRecModel(BaseModel):
         if class_path is not None:
             config.update_class_path(class_path)
 
+        # PDX related settings
+        config.update({"Global.pdx_model_name": self.name})
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        config.update({"Global.hpi_config_path": hpi_config_path})
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:

+ 7 - 0
paddlex/repo_apis/PaddleOCR_api/text_rec/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import TextRecModel
@@ -22,6 +23,7 @@ from .config import TextRecConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLEOCR_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -39,6 +41,7 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_mobile_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_mobile_rec.yaml",
     }
 )
 
@@ -48,6 +51,7 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "PP-OCRv4_server_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-OCRv4_server_rec.yaml",
     }
 )
 
@@ -57,6 +61,7 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "ch_SVTRv2_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ch_SVTRv2_rec.yaml",
     }
 )
 
@@ -66,6 +71,7 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "ch_RepSVTR_rec.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "hpi_config_path": HPI_CONFIG_DIR / "ch_RepSVTR_rec.yaml",
     }
 )
 
@@ -75,5 +81,6 @@ register_model_info(
         "suite": "TextRec",
         "config_path": osp.join(PDX_CONFIG_DIR, "LaTeX_OCR_rec.yml"),
         "supported_apis": ["train", "evaluate", "predict", "export", "infer"],
+        "hpi_config_path": None,
     }
 )

+ 14 - 0
paddlex/repo_apis/PaddleSeg_api/seg/model.py

@@ -164,6 +164,13 @@ class SegModel(BaseModel):
             if seed is not None:
                 cli_args.append(CLIArgument("--seed", seed))
 
+        # PDX related settings
+        config.set_val("pdx_model_name", self.name)
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.set_val("hpi_config_path", hpi_config_path)
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:
@@ -340,6 +347,13 @@ class SegModel(BaseModel):
             ], "`output_op` must be 'softmax' or 'argmax'."
             cli_args.append(CLIArgument("--output_op", output_op))
 
+        # PDX related settings
+        config.set_val("pdx_model_name", self.name)
+        hpi_config_path = self.model_info.get("hpi_config_path", None)
+        if hpi_config_path:
+            hpi_config_path = hpi_config_path.as_posix()
+        config.set_val("hpi_config_path", hpi_config_path)
+
         self._assert_empty_kwargs(kwargs)
 
         with self._create_new_config_file() as config_path:

+ 19 - 0
paddlex/repo_apis/PaddleSeg_api/seg/register.py

@@ -14,6 +14,7 @@
 
 import os
 import os.path as osp
+from pathlib import Path
 
 from ...base.register import register_model_info, register_suite_info
 from .model import SegModel
@@ -22,6 +23,7 @@ from .config import SegConfig
 
 REPO_ROOT_PATH = os.environ.get("PADDLE_PDX_PADDLESEG_PATH")
 PDX_CONFIG_DIR = osp.abspath(osp.join(osp.dirname(__file__), "..", "configs"))
+HPI_CONFIG_DIR = Path(__file__).parent.parent.parent.parent / "utils" / "hpi_configs"
 
 register_suite_info(
     {
@@ -41,6 +43,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "OCRNet_HRNet-W48.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "OCRNet_HRNet-W48.yaml",
     }
 )
 
@@ -50,6 +53,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "OCRNet_HRNet-W18.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "OCRNet_HRNet-W18.yaml",
     }
 )
 
@@ -72,6 +76,7 @@ register_model_info(
         "supported_predict_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_infer_opts": {"device": ["cpu", "gpu", "xpu", "npu", "mlu"]},
         "supported_dataset_types": [],
+        "hpi_config_path": HPI_CONFIG_DIR / "PP-LiteSeg-T.yaml",
     }
 )
 
@@ -82,6 +87,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_base.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_base.yaml",
     }
 )
 
@@ -91,6 +97,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_tiny.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_tiny.yaml",
     }
 )
 
@@ -100,6 +107,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_small.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_small.yaml",
     }
 )
 
@@ -109,6 +117,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SeaFormer_large.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SeaFormer_large.yaml",
     }
 )
 
@@ -119,6 +128,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B0.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B0.yaml",
     }
 )
 
@@ -128,6 +138,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B1.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B1.yaml",
     }
 )
 
@@ -137,6 +148,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B2.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B2.yaml",
     }
 )
 
@@ -146,6 +158,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B3.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B3.yaml",
     }
 )
 
@@ -155,6 +168,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B4.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B4.yaml",
     }
 )
 
@@ -164,6 +178,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "SegFormer-B5.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "SegFormer-B5.yaml",
     }
 )
 
@@ -174,6 +189,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3-R50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3-R50.yaml",
     }
 )
 
@@ -183,6 +199,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3-R101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3-R101.yaml",
     }
 )
 
@@ -192,6 +209,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3_Plus-R50.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3_Plus-R50.yaml",
     }
 )
 
@@ -201,6 +219,7 @@ register_model_info(
         "suite": "Seg",
         "config_path": osp.join(PDX_CONFIG_DIR, "Deeplabv3_Plus-R101.yaml"),
         "supported_apis": ["train", "evaluate", "predict", "export"],
+        "hpi_config_path": HPI_CONFIG_DIR / "Deeplabv3_Plus-R101.yaml",
     }
 )
 

+ 35 - 0
paddlex/utils/hpi_configs/CLIP_vit_base_patch16_224.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/CLIP_vit_large_patch14_224.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_base_224.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_base_384.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_large_224.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_large_384.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_small.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ConvNeXt_tiny.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 32 - 0
paddlex/utils/hpi_configs/Deeplabv3-R101.yaml

@@ -0,0 +1,32 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 32 - 0
paddlex/utils/hpi_configs/Deeplabv3-R50.yaml

@@ -0,0 +1,32 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 32 - 0
paddlex/utils/hpi_configs/Deeplabv3_Plus-R101.yaml

@@ -0,0 +1,32 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 32 - 0
paddlex/utils/hpi_configs/Deeplabv3_Plus-R50.yaml

@@ -0,0 +1,32 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 13 - 0
paddlex/utils/hpi_configs/Mask-RT-DETR-H.yaml

@@ -0,0 +1,13 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer

+ 13 - 0
paddlex/utils/hpi_configs/Mask-RT-DETR-L.yaml

@@ -0,0 +1,13 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV1_x0_25.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV1_x0_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV1_x0_75.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV1_x1_0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/MobileNetV2_x0_25.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/MobileNetV2_x0_5.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/MobileNetV2_x1_0.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/MobileNetV2_x1_5.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/MobileNetV2_x2_0.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_large_x0_35.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_large_x0_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_large_x0_75.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_large_x1_0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_large_x1_25.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_small_x0_35.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_small_x0_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_small_x0_75.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_small_x1_0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/MobileNetV3_small_x1_25.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 31 - 0
paddlex/utils/hpi_configs/OCRNet_HRNet-W18.yaml

@@ -0,0 +1,31 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - tensorrt

+ 31 - 0
paddlex/utils/hpi_configs/OCRNet_HRNet-W48.yaml

@@ -0,0 +1,31 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B1.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B2.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B3.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B4.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNetV2-B6.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNet_base.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNet_small.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-HGNet_tiny.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNetV2_base.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNetV2_large.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNetV2_small.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x0_25.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x0_35.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x0_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x0_75.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x1_0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x1_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x2_0.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-LCNet_x2_5.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 32 - 0
paddlex/utils/hpi_configs/PP-LiteSeg-T.yaml

@@ -0,0 +1,32 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-OCRv4_mobile_det.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-OCRv4_mobile_rec.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PP-OCRv4_server_det.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-OCRv4_server_rec.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-YOLOE_plus-L.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-YOLOE_plus-M.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-YOLOE_plus-S.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 24 - 0
paddlex/utils/hpi_configs/PP-YOLOE_plus-X.yaml

@@ -0,0 +1,24 @@
+Hpi:
+  backend_config:
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: openvino
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PicoDet-L.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PicoDet-S.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/PicoDet_layout_1x.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/RT-DETR-H.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/RT-DETR-L.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/RT-DETR-R18.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/RT-DETR-R50.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/RT-DETR-X.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        image:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet101.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet101_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet152.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet152_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet18.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet18_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet200_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet34.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet34_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: paddle_tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet50.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 35 - 0
paddlex/utils/hpi_configs/ResNet50_vd.yaml

@@ -0,0 +1,35 @@
+Hpi:
+  backend_config:
+    onnx_runtime:
+      cpu_num_threads: 8
+    openvino:
+      cpu_num_threads: 8
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+    tensorrt:
+      dynamic_shapes:
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: onnx_runtime
+    gpu: tensorrt
+  supported_backends:
+    cpu:
+    - paddle_infer
+    - openvino
+    - onnx_runtime
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt
+    - onnx_runtime
+    - tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/SLANet.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/SeaFormer_base.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/SeaFormer_large.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

+ 21 - 0
paddlex/utils/hpi_configs/SeaFormer_small.yaml

@@ -0,0 +1,21 @@
+Hpi:
+  backend_config:
+    paddle_infer:
+      cpu_num_threads: 8
+      enable_log_info: false
+    paddle_tensorrt:
+      dynamic_shapes:
+        enable_log_info: false
+        x:
+        - []
+        - []
+        - []
+  selected_backends:
+    cpu: paddle_infer
+    gpu: paddle_infer
+  supported_backends:
+    cpu:
+    - paddle_infer
+    gpu:
+    - paddle_infer
+    - paddle_tensorrt

Неке датотеке нису приказане због велике количине промена