|
|
@@ -151,10 +151,11 @@ class BaseAPI:
|
|
|
is_use_cache_file = True
|
|
|
if cache_dir is None:
|
|
|
is_use_cache_file = False
|
|
|
+ quant_prog = self.test_prog.clone(for_test=True)
|
|
|
post_training_quantization = PaddleXPostTrainingQuantization(
|
|
|
executor=self.exe,
|
|
|
dataset=dataset,
|
|
|
- program=self.test_prog,
|
|
|
+ program=quant_prog,
|
|
|
inputs=self.test_inputs,
|
|
|
outputs=self.test_outputs,
|
|
|
batch_size=batch_size,
|
|
|
@@ -366,6 +367,7 @@ class BaseAPI:
|
|
|
def export_inference_model(self, save_dir):
|
|
|
test_input_names = [var.name for var in list(self.test_inputs.values())]
|
|
|
test_outputs = list(self.test_outputs.values())
|
|
|
+ save_prog = self.test_prog.clone(for_test=True)
|
|
|
with fluid.scope_guard(self.scope):
|
|
|
fluid.io.save_inference_model(
|
|
|
dirname=save_dir,
|
|
|
@@ -373,7 +375,7 @@ class BaseAPI:
|
|
|
params_filename='__params__',
|
|
|
feeded_var_names=test_input_names,
|
|
|
target_vars=test_outputs,
|
|
|
- main_program=self.test_prog)
|
|
|
+ main_program=save_prog)
|
|
|
model_info = self.get_model_info()
|
|
|
model_info['status'] = 'Infer'
|
|
|
|