소스 검색

update pp option

1. support option.enable_new_ir
2. rename
3. update args
gaotingquan 1 년 전
부모
커밋
ecfd5c2454

+ 1 - 1
paddlex/inference/__init__.py

@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .components.paddle_predictor.option import PaddlePredictorOption
+from .utils.pp_option import PaddlePredictorOption

+ 0 - 1
paddlex/inference/components/paddle_predictor/__init__.py

@@ -12,5 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from .option import PaddlePredictorOption
 from .predictor import ImagePredictor

+ 4 - 1
paddlex/inference/components/paddle_predictor/predictor.py

@@ -57,7 +57,7 @@ class BasePaddlePredictor(BaseComponent):
             if paddle.is_compiled_with_rocm():
                 os.environ["FLAGS_conv_workspace_size_limit"] = "2000"
             elif hasattr(config, "enable_new_ir"):
-                config.enable_new_ir(True)
+                config.enable_new_ir(option.enable_new_ir)
         elif option.device == "npu":
             config.enable_custom_device("npu")
             os.environ["FLAGS_npu_jit_compile"] = "0"
@@ -78,6 +78,8 @@ class BasePaddlePredictor(BaseComponent):
         else:
             assert option.device == "cpu"
             config.disable_gpu()
+            config.enable_new_ir(option.enable_new_ir)
+            config.enable_new_executor(True)
             if "mkldnn" in option.run_mode:
                 try:
                     config.enable_mkldnn()
@@ -132,6 +134,7 @@ No need to generate again."
 
         # Get input and output handlers
         input_names = predictor.get_input_names()
+        input_names.sort()
         input_handlers = []
         output_handlers = []
         for input_name in input_names:

+ 1 - 1
paddlex/inference/predictors/base.py

@@ -23,7 +23,7 @@ from ...utils.subclass_register import AutoRegisterABCMetaClass
 from ..utils.device import constr_device
 from ...utils import logging
 from ..components.base import BaseComponent, ComponentsEngine
-from ..components.paddle_predictor.option import PaddlePredictorOption
+from ..utils.pp_option import PaddlePredictorOption
 from ..utils.process_hook import generatorable_method
 
 

+ 9 - 11
paddlex/inference/components/paddle_predictor/option.py → paddlex/inference/utils/pp_option.py

@@ -12,9 +12,9 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-from ...utils.device import parse_device
-from ....utils.func_register import FuncRegister
-from ....utils import logging
+from .device import parse_device
+from ...utils.func_register import FuncRegister
+from ...utils import logging
 
 
 class PaddlePredictorOption(object):
@@ -53,7 +53,6 @@ class PaddlePredictorOption(object):
         """get default config"""
         return {
             "run_mode": "paddle",
-            "batch_size": 1,
             "device": "gpu",
             "device_id": 0,
             "min_subgraph_size": 3,
@@ -62,6 +61,7 @@ class PaddlePredictorOption(object):
             "cpu_threads": 1,
             "trt_use_static": False,
             "delete_pass": [],
+            "enable_new_ir": True,
         }
 
     @register("run_mode")
@@ -74,13 +74,6 @@ class PaddlePredictorOption(object):
             )
         self._cfg["run_mode"] = run_mode
 
-    @register("batch_size")
-    def set_batch_size(self, batch_size: int):
-        """set batch size"""
-        if not isinstance(batch_size, int) or batch_size < 1:
-            raise Exception()
-        self._cfg["batch_size"] = batch_size
-
     @register("device")
     def set_device(self, device: str):
         """set device"""
@@ -128,6 +121,11 @@ class PaddlePredictorOption(object):
     def set_delete_pass(self, delete_pass):
         self._cfg["delete_pass"] = delete_pass
 
+    @register("enable_new_ir")
+    def set_enable_new_ir(self, enable_new_ir: bool):
+        """set run mode"""
+        self._cfg["enable_new_ir"] = enable_new_ir
+
     def get_support_run_mode(self):
         """get supported run mode"""
         return self.SUPPORT_RUN_MODE

+ 6 - 0
paddlex/modules/base/predictor/kernel_option.py

@@ -77,6 +77,7 @@ class PaddleInferenceOption(object):
             "trt_calib_mode": False,
             "cpu_threads": 1,
             "trt_use_static": False,
+            "enable_new_ir": True,
         }
 
     @register2self("run_mode")
@@ -145,6 +146,11 @@ class PaddleInferenceOption(object):
         """set trt use static"""
         self._cfg["trt_use_static"] = trt_use_static
 
+    @register2self("enable_new_ir")
+    def set_enable_new_ir(self, enable_new_ir: bool):
+        """set run mode"""
+        self._cfg["enable_new_ir"] = enable_new_ir
+
     def get_support_run_mode(self):
         """get supported run mode"""
         return self.SUPPORT_RUN_MODE

+ 2 - 2
paddlex/modules/base/predictor/utils/paddle_inference_predictor.py

@@ -47,7 +47,7 @@ class _PaddleInferencePredictor(object):
             if paddle.is_compiled_with_rocm():
                 os.environ["FLAGS_conv_workspace_size_limit"] = "2000"
             else:
-                config.enable_new_ir(True)
+                config.enable_new_ir(option.enable_new_ir)
         elif option.device == "npu":
             config.enable_custom_device("npu")
             os.environ["FLAGS_npu_jit_compile"] = "0"
@@ -68,7 +68,7 @@ class _PaddleInferencePredictor(object):
         else:
             assert option.device == "cpu"
             config.disable_gpu()
-            config.enable_new_ir(True)
+            config.enable_new_ir(option.enable_new_ir)
             config.enable_new_executor(True)
             if "mkldnn" in option.run_mode:
                 try: