Sfoglia il codice sorgente

disable qat for exported infer models

will-jl944 4 anni fa
parent
commit
652591b3b8
1 ha cambiato i file con 4 aggiunte e 0 eliminazioni
  1. 4 0
      paddlex/cv/models/base.py

+ 4 - 0
paddlex/cv/models/base.py

@@ -505,6 +505,10 @@ class BaseModel:
             logging.info("Pruned model is saved at {}".format(save_dir))
 
     def _prepare_qat(self, quant_config):
+        if self.status == 'Infer':
+            logging.error(
+                "Exported inference model does not support quantization aware training.",
+                exit=True)
         if quant_config is None:
             # default quantization configuration
             quant_config = {