Browse Source

Fix typos in multiple files (#3994)

Signed-off-by: co63oc <co63oc@users.noreply.github.com>
co63oc 6 months ago
parent
commit
0f66f4546f
33 changed files with 45 additions and 45 deletions
  1. 1 1
      libs/ultra-infer/python/ultra_infer/pipeline/pptinypose/__init__.py
  2. 1 1
      libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h
  3. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet.h
  4. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.cc
  5. 1 1
      libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.cc
  6. 1 1
      libs/ultra-infer/ultra_infer/vision/detection/ppdet/preprocessor.cc
  7. 1 1
      libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/preprocessor.cc
  8. 3 3
      libs/ultra-infer/ultra_infer/vision/generation/contrib/preprocessor.cc
  9. 1 1
      libs/ultra-infer/ultra_infer/vision/perception/paddle3d/caddn/preprocessor.cc
  10. 1 1
      libs/ultra-infer/ultra_infer/vision/perception/paddle3d/petr/preprocessor.cc
  11. 1 1
      libs/ultra-infer/ultra_infer/vision/perception/paddle3d/smoke/preprocessor.cc
  12. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/lapjv.cc
  13. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/lapjv.h
  14. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/tracker.cc
  15. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/tracker.h
  16. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/trajectory.cc
  17. 1 1
      libs/ultra-infer/ultra_infer/vision/tracking/pptracking/trajectory.h
  18. 1 1
      paddlex/__init__.py
  19. 2 2
      paddlex/inference/models/common/tokenizer/tokenizer_utils.py
  20. 1 1
      paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py
  21. 2 2
      paddlex/inference/models/common/tokenizer/vocab.py
  22. 2 2
      paddlex/inference/models/common/vlm/transformers/configuration_utils.py
  23. 4 4
      paddlex/inference/pipelines/table_recognition/pipeline_v2.py
  24. 1 1
      paddlex/inference/utils/io/readers.py
  25. 2 2
      paddlex/model.py
  26. 1 1
      paddlex/modules/__init__.py
  27. 1 1
      paddlex/modules/base/__init__.py
  28. 3 3
      paddlex/modules/base/evaluator.py
  29. 2 2
      paddlex/ops/__init__.py
  30. 1 1
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml
  31. 1 1
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml
  32. 1 1
      paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml
  33. 1 1
      paddlex/repo_apis/PaddleVideo_api/configs/YOWO.yaml

+ 1 - 1
libs/ultra-infer/python/ultra_infer/pipeline/pptinypose/__init__.py

@@ -40,7 +40,7 @@ class PPTinyPose(object):
 
 
     @property
     @property
     def detection_model_score_threshold(self):
     def detection_model_score_threshold(self):
-        """Attribute of PPTinyPose pipeline model. Stating the score threshold for detectin model to filter bbox before inputting pptinypose model
+        """Attribute of PPTinyPose pipeline model. Stating the score threshold for detecting model to filter bbox before inputting pptinypose model
 
 
         :return: value of detection_model_score_threshold(float)
         :return: value of detection_model_score_threshold(float)
         """
         """

+ 1 - 1
libs/ultra-infer/ultra_infer/pipeline/pptinypose/pipeline.h

@@ -48,7 +48,7 @@ public:
   virtual bool Predict(cv::Mat *img,
   virtual bool Predict(cv::Mat *img,
                        ultra_infer::vision::KeyPointDetectionResult *result);
                        ultra_infer::vision::KeyPointDetectionResult *result);
 
 
-  /* \brief The score threshold for detectin model to filter bbox before
+  /* \brief The score threshold for detecting model to filter bbox before
    * inputting pptinypose model
    * inputting pptinypose model
    */
    */
   float detection_model_score_threshold = 0;
   float detection_model_score_threshold = 0;

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/contrib/resnet.h

@@ -17,7 +17,7 @@
 #include "ultra_infer/vision/common/processors/transform.h"
 #include "ultra_infer/vision/common/processors/transform.h"
 #include "ultra_infer/vision/common/result.h"
 #include "ultra_infer/vision/common/result.h"
 
 
-// The namespace shoulde be
+// The namespace should be
 // ultra_infer::vision::classification (ultra_infer::vision::${task})
 // ultra_infer::vision::classification (ultra_infer::vision::${task})
 namespace ultra_infer {
 namespace ultra_infer {
 namespace vision {
 namespace vision {

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppcls/preprocessor.cc

@@ -137,7 +137,7 @@ bool PaddleClasPreprocessor::Apply(FDMatBatch *image_batch,
       image_batch->proc_lib = ProcLib::OPENCV;
       image_batch->proc_lib = ProcLib::OPENCV;
     }
     }
     if (!(*(processors_[j].get()))(image_batch)) {
     if (!(*(processors_[j].get()))(image_batch)) {
-      FDERROR << "Failed to processs image in " << processors_[j]->Name() << "."
+      FDERROR << "Failed to process image in " << processors_[j]->Name() << "."
               << std::endl;
               << std::endl;
       return false;
       return false;
     }
     }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/classification/ppshitu/ppshituv2_rec_preprocessor.cc

@@ -141,7 +141,7 @@ bool PPShiTuV2RecognizerPreprocessor::Apply(FDMatBatch *image_batch,
       image_batch->proc_lib = ProcLib::OPENCV;
       image_batch->proc_lib = ProcLib::OPENCV;
     }
     }
     if (!(*(processors_[j].get()))(image_batch)) {
     if (!(*(processors_[j].get()))(image_batch)) {
-      FDERROR << "Failed to processs image in " << processors_[j]->Name() << "."
+      FDERROR << "Failed to process image in " << processors_[j]->Name() << "."
               << std::endl;
               << std::endl;
       return false;
       return false;
     }
     }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/detection/ppdet/preprocessor.cc

@@ -167,7 +167,7 @@ bool PaddleDetPreprocessor::Apply(FDMatBatch *image_batch,
     scale_factor_ptr[2 * i + 1] = 1.0;
     scale_factor_ptr[2 * i + 1] = 1.0;
     for (size_t j = 0; j < processors_.size(); ++j) {
     for (size_t j = 0; j < processors_.size(); ++j) {
       if (!(*(processors_[j].get()))(mat)) {
       if (!(*(processors_[j].get()))(mat)) {
-        FDERROR << "Failed to processs image:" << i << " in "
+        FDERROR << "Failed to process image:" << i << " in "
                 << processors_[j]->Name() << "." << std::endl;
                 << processors_[j]->Name() << "." << std::endl;
         return false;
         return false;
       }
       }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/facedet/ppdet/blazeface/preprocessor.cc

@@ -66,7 +66,7 @@ bool BlazeFacePreprocessor::Run(
 
 
     for (size_t j = 0; j < processors_.size(); ++j) {
     for (size_t j = 0; j < processors_.size(); ++j) {
       if (!(*(processors_[j].get()))(&((*images)[i]))) {
       if (!(*(processors_[j].get()))(&((*images)[i]))) {
-        FDERROR << "Failed to processs image:" << i << " in "
+        FDERROR << "Failed to process image:" << i << " in "
                 << processors_[i]->Name() << "." << std::endl;
                 << processors_[i]->Name() << "." << std::endl;
         return false;
         return false;
       }
       }

+ 3 - 3
libs/ultra-infer/ultra_infer/vision/generation/contrib/preprocessor.cc

@@ -25,14 +25,14 @@ bool AnimeGANPreprocessor::Run(std::vector<Mat> &images,
   for (size_t i = 0; i < images.size(); ++i) {
   for (size_t i = 0; i < images.size(); ++i) {
     auto ret = BGR2RGB::Run(&images[i]);
     auto ret = BGR2RGB::Run(&images[i]);
     if (!ret) {
     if (!ret) {
-      FDERROR << "Failed to processs image:" << i << " in "
+      FDERROR << "Failed to process image:" << i << " in "
               << "BGR2RGB"
               << "BGR2RGB"
               << "." << std::endl;
               << "." << std::endl;
       return false;
       return false;
     }
     }
     ret = Cast::Run(&images[i], "float");
     ret = Cast::Run(&images[i], "float");
     if (!ret) {
     if (!ret) {
-      FDERROR << "Failed to processs image:" << i << " in "
+      FDERROR << "Failed to process image:" << i << " in "
               << "Cast"
               << "Cast"
               << "." << std::endl;
               << "." << std::endl;
       return false;
       return false;
@@ -41,7 +41,7 @@ bool AnimeGANPreprocessor::Run(std::vector<Mat> &images,
     std::vector<float> std{-1.f, -1.f, -1.f};
     std::vector<float> std{-1.f, -1.f, -1.f};
     ret = Convert::Run(&images[i], mean, std);
     ret = Convert::Run(&images[i], mean, std);
     if (!ret) {
     if (!ret) {
-      FDERROR << "Failed to processs image:" << i << " in "
+      FDERROR << "Failed to process image:" << i << " in "
               << "Cast"
               << "Cast"
               << "." << std::endl;
               << "." << std::endl;
       return false;
       return false;

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/perception/paddle3d/caddn/preprocessor.cc

@@ -77,7 +77,7 @@ bool CaddnPreprocessor::Apply(FDMatBatch *image_batch,
     FDMat *mat = &(image_batch->mats->at(i));
     FDMat *mat = &(image_batch->mats->at(i));
     for (size_t j = 0; j < processors_.size(); ++j) {
     for (size_t j = 0; j < processors_.size(); ++j) {
       if (!(*(processors_[j].get()))(mat)) {
       if (!(*(processors_[j].get()))(mat)) {
-        FDERROR << "Failed to processs image:" << i << " in "
+        FDERROR << "Failed to process image:" << i << " in "
                 << processors_[j]->Name() << "." << std::endl;
                 << processors_[j]->Name() << "." << std::endl;
         return false;
         return false;
       }
       }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/perception/paddle3d/petr/preprocessor.cc

@@ -83,7 +83,7 @@ bool PetrPreprocessor::Apply(FDMatBatch *image_batch,
     FDMat *mat = &(image_batch->mats->at(i));
     FDMat *mat = &(image_batch->mats->at(i));
     for (size_t j = 0; j < processors_.size(); ++j) {
     for (size_t j = 0; j < processors_.size(); ++j) {
       if (!(*(processors_[j].get()))(mat)) {
       if (!(*(processors_[j].get()))(mat)) {
-        FDERROR << "Failed to processs image:" << i << " in "
+        FDERROR << "Failed to process image:" << i << " in "
                 << processors_[j]->Name() << "." << std::endl;
                 << processors_[j]->Name() << "." << std::endl;
         return false;
         return false;
       }
       }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/perception/paddle3d/smoke/preprocessor.cc

@@ -139,7 +139,7 @@ bool SmokePreprocessor::Apply(FDMatBatch *image_batch,
     FDMat *mat = &(image_batch->mats->at(i));
     FDMat *mat = &(image_batch->mats->at(i));
     for (size_t j = 0; j < processors_.size(); ++j) {
     for (size_t j = 0; j < processors_.size(); ++j) {
       if (!(*(processors_[j].get()))(mat)) {
       if (!(*(processors_[j].get()))(mat)) {
-        FDERROR << "Failed to processs image:" << i << " in "
+        FDERROR << "Failed to process image:" << i << " in "
                 << processors_[j]->Name() << "." << std::endl;
                 << processors_[j]->Name() << "." << std::endl;
         return false;
         return false;
       }
       }

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/lapjv.cc

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/gatagat/lap/blob/master/lap/lapjv.cpp
 // https://github.com/gatagat/lap/blob/master/lap/lapjv.cpp
-// Ths copyright of gatagat/lap is as follows:
+// The copyright of gatagat/lap is as follows:
 // MIT License
 // MIT License
 
 
 #include <stdio.h>
 #include <stdio.h>

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/lapjv.h

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/gatagat/lap/blob/master/lap/lapjv.h
 // https://github.com/gatagat/lap/blob/master/lap/lapjv.h
-// Ths copyright of gatagat/lap is as follows:
+// The copyright of gatagat/lap is as follows:
 // MIT License
 // MIT License
 
 
 #pragma once
 #pragma once

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/tracker.cc

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.cpp
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.cpp
-// Ths copyright of CnybTseng/JDE is as follows:
+// The copyright of CnybTseng/JDE is as follows:
 // MIT License
 // MIT License
 
 
 #include <algorithm>
 #include <algorithm>

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/tracker.h

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.h
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/jdetracker.h
-// Ths copyright of CnybTseng/JDE is as follows:
+// The copyright of CnybTseng/JDE is as follows:
 // MIT License
 // MIT License
 
 
 #pragma once
 #pragma once

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/trajectory.cc

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.cpp
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.cpp
-// Ths copyright of CnybTseng/JDE is as follows:
+// The copyright of CnybTseng/JDE is as follows:
 // MIT License
 // MIT License
 
 
 #include "ultra_infer/vision/tracking/pptracking/trajectory.h"
 #include "ultra_infer/vision/tracking/pptracking/trajectory.h"

+ 1 - 1
libs/ultra-infer/ultra_infer/vision/tracking/pptracking/trajectory.h

@@ -14,7 +14,7 @@
 
 
 // The code is based on:
 // The code is based on:
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.h
 // https://github.com/CnybTseng/JDE/blob/master/platforms/common/trajectory.h
-// Ths copyright of CnybTseng/JDE is as follows:
+// The copyright of CnybTseng/JDE is as follows:
 // MIT License
 // MIT License
 
 
 #pragma once
 #pragma once

+ 1 - 1
paddlex/__init__.py

@@ -24,7 +24,7 @@ for mod in _SPECIAL_MODS:
 from . import version
 from . import version
 from .inference import create_pipeline, create_predictor
 from .inference import create_pipeline, create_predictor
 from .model import create_model
 from .model import create_model
-from .modules import build_dataset_checker, build_evaluater, build_trainer
+from .modules import build_dataset_checker, build_evaluator, build_trainer
 
 
 
 
 def _initialize():
 def _initialize():

+ 2 - 2
paddlex/inference/models/common/tokenizer/tokenizer_utils.py

@@ -688,7 +688,7 @@ class ChatTemplateMixin:
             conversation = [[conversation]]
             conversation = [[conversation]]
         elif isinstance(conversation, list) and isinstance(conversation[0], str):
         elif isinstance(conversation, list) and isinstance(conversation[0], str):
             raise ValueError(
             raise ValueError(
-                "apply_chat_template do not support appling batch conversations, "
+                "apply_chat_template do not support applying batch conversations, "
                 "so you should apply the conversation one by one."
                 "so you should apply the conversation one by one."
             )
             )
 
 
@@ -710,7 +710,7 @@ class ChatTemplateMixin:
                 conversations = conversation
                 conversations = conversation
             else:
             else:
                 raise ValueError(
                 raise ValueError(
-                    "apply_chat_template do not support appling batch conversations, "
+                    "apply_chat_template do not support applying batch conversations, "
                     "so you should apply the conversation one by one."
                     "so you should apply the conversation one by one."
                 )
                 )
         query = self.chat_template.render(
         query = self.chat_template.render(

+ 1 - 1
paddlex/inference/models/common/tokenizer/tokenizer_utils_base.py

@@ -3531,7 +3531,7 @@ class PretrainedTokenizerBase(SpecialTokensMixin):
         prefix_offset: int = 0,
         prefix_offset: int = 0,
         read_offset: int = 0,
         read_offset: int = 0,
     ) -> Tuple[str, int, int]:
     ) -> Tuple[str, int, int]:
-        """tokenizer decoding for the streaming generation use case. This method can be overrided for tokenizer that doesn't follow this API"""
+        """tokenizer decoding for the streaming generation use case. This method can be overridden for tokenizer that doesn't follow this API"""
         prefix_text = self.decode(
         prefix_text = self.decode(
             all_input_ids[prefix_offset:read_offset],
             all_input_ids[prefix_offset:read_offset],
             skip_special_tokens=False,
             skip_special_tokens=False,

+ 2 - 2
paddlex/inference/models/common/tokenizer/vocab.py

@@ -27,7 +27,7 @@ class Vocab(object):
     store/load functions.
     store/load functions.
 
 
     Args:
     Args:
-        counter (collections.Counter, optional): A Counter intance describes
+        counter (collections.Counter, optional): A Counter instance describes
             the tokens and their frequencies. Its keys will be indexed according
             the tokens and their frequencies. Its keys will be indexed according
             to the order of frequency sorting to construct mapping relationship.
             to the order of frequency sorting to construct mapping relationship.
             If None, `token_to_idx` must be provided as the mapping relationship.
             If None, `token_to_idx` must be provided as the mapping relationship.
@@ -480,7 +480,7 @@ class Vocab(object):
         **kwargs
         **kwargs
     ):
     ):
         """
         """
-        Builds the :class:`Vocab` accoring to given iterator and other
+        Builds the :class:`Vocab` according to given iterator and other
         information. Firstly, iterate over the `iterator` to construct a
         information. Firstly, iterate over the `iterator` to construct a
         :class:`collections.Counter` and used to init the as  :class:`Vocab`.
         :class:`collections.Counter` and used to init the as  :class:`Vocab`.
 
 

+ 2 - 2
paddlex/inference/models/common/vlm/transformers/configuration_utils.py

@@ -496,7 +496,7 @@ class PretrainedConfig:
             if num_labels is not None and len(self.id2label) != num_labels:
             if num_labels is not None and len(self.id2label) != num_labels:
                 logging.warning(
                 logging.warning(
                     f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
                     f"You passed along `num_labels={num_labels}` with an incompatible id to label map: "
-                    f"{self.id2label}. The number of labels wil be overwritten to {self.num_labels}."
+                    f"{self.id2label}. The number of labels will be overwritten to {self.num_labels}."
                 )
                 )
             self.id2label = dict(
             self.id2label = dict(
                 (int(key), value) for key, value in self.id2label.items()
                 (int(key), value) for key, value in self.id2label.items()
@@ -939,7 +939,7 @@ class PretrainedConfig:
 
 
             output[key] = value
             output[key] = value
 
 
-        # Fix for rewrited from_pretrained method, hasattr
+        # Fix for rewrote from_pretrained method, hasattr
         if saving_file and hasattr(self, "_unsavable_keys"):
         if saving_file and hasattr(self, "_unsavable_keys"):
             for key in list(output.keys()):
             for key in list(output.keys()):
                 if key in self._unsavable_keys:
                 if key in self._unsavable_keys:

+ 4 - 4
paddlex/inference/pipelines/table_recognition/pipeline_v2.py

@@ -969,8 +969,8 @@ class _TableRecognitionPipelineV2(BasePipeline):
             table_box (list): The table box coordinates.
             table_box (list): The table box coordinates.
             use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
             use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
             use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
             use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
-            use_wired_table_cells_trans_to_html (bool): Whether to use wired tabel cells trans to HTML.
-            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless tabel cells trans to HTML.
+            use_wired_table_cells_trans_to_html (bool): Whether to use wired table cells trans to HTML.
+            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless table cells trans to HTML.
             use_ocr_results_with_table_cells (bool): Whether to use OCR results processed by table cells.
             use_ocr_results_with_table_cells (bool): Whether to use OCR results processed by table cells.
             flag_find_nei_text (bool): Whether to find neighboring text.
             flag_find_nei_text (bool): Whether to find neighboring text.
         Returns:
         Returns:
@@ -1134,8 +1134,8 @@ class _TableRecognitionPipelineV2(BasePipeline):
                 It will be used if it is not None and use_layout_detection is False.
                 It will be used if it is not None and use_layout_detection is False.
             use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
             use_e2e_wired_table_rec_model (bool): Whether to use end-to-end wired table recognition model.
             use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
             use_e2e_wireless_table_rec_model (bool): Whether to use end-to-end wireless table recognition model.
-            use_wired_table_cells_trans_to_html (bool): Whether to use wired tabel cells trans to HTML.
-            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless tabel cells trans to HTML.
+            use_wired_table_cells_trans_to_html (bool): Whether to use wired table cells trans to HTML.
+            use_wireless_table_cells_trans_to_html (bool): Whether to use wireless table cells trans to HTML.
             use_table_orientation_classify (bool): Whether to use table orientation classification.
             use_table_orientation_classify (bool): Whether to use table orientation classification.
             use_ocr_results_with_table_cells (bool): Whether to use OCR results processed by table cells.
             use_ocr_results_with_table_cells (bool): Whether to use OCR results processed by table cells.
             **kwargs: Additional keyword arguments.
             **kwargs: Additional keyword arguments.

+ 1 - 1
paddlex/inference/utils/io/readers.py

@@ -361,7 +361,7 @@ class DecordVideoReaderBackend(_VideoReaderBackend):
         self.valid_mode = True
         self.valid_mode = True
         self._fps = 0
         self._fps = 0
 
 
-        # XXX(gaotingquan): There is a confict with `paddle` when import `decord` globally.
+        # XXX(gaotingquan): There is a conflict with `paddle` when import `decord` globally.
         try:
         try:
             import decord
             import decord
 
 

+ 2 - 2
paddlex/model.py

@@ -17,7 +17,7 @@ from copy import deepcopy
 from .inference import PaddlePredictorOption, create_predictor
 from .inference import PaddlePredictorOption, create_predictor
 from .modules import (
 from .modules import (
     build_dataset_checker,
     build_dataset_checker,
-    build_evaluater,
+    build_evaluator,
     build_exportor,
     build_exportor,
     build_trainer,
     build_trainer,
 )
 )
@@ -119,7 +119,7 @@ class _ModelBasedConfig(_BaseModel):
         trainer.train()
         trainer.train()
 
 
     def evaluate(self):
     def evaluate(self):
-        evaluator = build_evaluater(self._config)
+        evaluator = build_evaluator(self._config)
         return evaluator.evaluate()
         return evaluator.evaluate()
 
 
     def export(self):
     def export(self):

+ 1 - 1
paddlex/modules/__init__.py

@@ -14,7 +14,7 @@
 from importlib import import_module
 from importlib import import_module
 
 
 from .anomaly_detection import UadDatasetChecker, UadEvaluator, UadExportor, UadTrainer
 from .anomaly_detection import UadDatasetChecker, UadEvaluator, UadExportor, UadTrainer
-from .base import build_dataset_checker, build_evaluater, build_exportor, build_trainer
+from .base import build_dataset_checker, build_evaluator, build_exportor, build_trainer
 from .face_recognition import (
 from .face_recognition import (
     FaceRecDatasetChecker,
     FaceRecDatasetChecker,
     FaceRecEvaluator,
     FaceRecEvaluator,

+ 1 - 1
paddlex/modules/base/__init__.py

@@ -13,6 +13,6 @@
 # limitations under the License.
 # limitations under the License.
 
 
 from .dataset_checker import BaseDatasetChecker, build_dataset_checker
 from .dataset_checker import BaseDatasetChecker, build_dataset_checker
-from .evaluator import BaseEvaluator, build_evaluater
+from .evaluator import BaseEvaluator, build_evaluator
 from .exportor import BaseExportor, build_exportor
 from .exportor import BaseExportor, build_exportor
 from .trainer import BaseTrainer, build_trainer
 from .trainer import BaseTrainer, build_trainer

+ 3 - 3
paddlex/modules/base/evaluator.py

@@ -27,14 +27,14 @@ from ...utils.misc import AutoRegisterABCMetaClass
 from .build_model import build_model
 from .build_model import build_model
 
 
 
 
-def build_evaluater(config: AttrDict) -> "BaseEvaluator":
-    """build model evaluater
+def build_evaluator(config: AttrDict) -> "BaseEvaluator":
+    """build model evaluator
 
 
     Args:
     Args:
         config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
         config (AttrDict): PaddleX pipeline config, which is loaded from pipeline yaml file.
 
 
     Returns:
     Returns:
-        BaseEvaluator: the evaluater, which is subclass of BaseEvaluator.
+        BaseEvaluator: the evaluator, which is subclass of BaseEvaluator.
     """
     """
     model_name = config.Global.model
     model_name = config.Global.model
     try:
     try:

+ 2 - 2
paddlex/ops/__init__.py

@@ -114,7 +114,7 @@ class PaddleXCustomOperatorModule(ModuleType):
             with filelock.FileLock(lockfile):
             with filelock.FileLock(lockfile):
                 return paddle_jit_load(name=self.modulename, sources=sources, **args)
                 return paddle_jit_load(name=self.modulename, sources=sources, **args)
         except:
         except:
-            logging.error("{} builded fail!".format(self.modulename))
+            logging.error("{} built fail!".format(self.modulename))
             raise
             raise
 
 
     def _load_module(self):
     def _load_module(self):
@@ -126,7 +126,7 @@ class PaddleXCustomOperatorModule(ModuleType):
                     "No custom op {} found, try JIT build".format(self.modulename)
                     "No custom op {} found, try JIT build".format(self.modulename)
                 )
                 )
                 self.module = self.jit_build()
                 self.module = self.jit_build()
-                logging.info("{} builded success!".format(self.modulename))
+                logging.info("{} built success!".format(self.modulename))
 
 
             # refresh
             # refresh
             sys.modules[self.fullname] = self.module
             sys.modules[self.fullname] = self.module

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSM-R50_8frames_uniform.yaml

@@ -151,6 +151,6 @@ Infer:
         class_id_map_file: data/k400/Kinetics-400_label_list.txt     
         class_id_map_file: data/k400/Kinetics-400_label_list.txt     
 
 
 model_name: "ppTSM"
 model_name: "ppTSM"
-log_interval: 10 #Optional, the interal of logger, default:10
+log_interval: 10 #Optional, the interval of logger, default:10
 epochs: 80 #Mandatory, total epoch
 epochs: 80 #Mandatory, total epoch
 log_level: "INFO" #Optional, the logger level. default: "INFO"
 log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_16frames_uniform.yaml

@@ -146,6 +146,6 @@ Infer:
         class_id_map_file: data/k400/Kinetics-400_label_list.txt      
         class_id_map_file: data/k400/Kinetics-400_label_list.txt      
 
 
 model_name: "ppTSMv2"
 model_name: "ppTSMv2"
-log_interval: 10 #Optional, the interal of logger, default:10
+log_interval: 10 #Optional, the interval of logger, default:10
 epochs: 120  #Mandatory, total epoch
 epochs: 120  #Mandatory, total epoch
 log_level: "INFO" #Optional, the logger level. default: "INFO"
 log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/configs/PP-TSMv2-LCNetV2_8frames_uniform.yaml

@@ -143,6 +143,6 @@ Infer:
         class_id_map_file: data/k400/Kinetics-400_label_list.txt     
         class_id_map_file: data/k400/Kinetics-400_label_list.txt     
 
 
 model_name: "ppTSMv2"
 model_name: "ppTSMv2"
-log_interval: 10 #Optional, the interal of logger, default:10
+log_interval: 10 #Optional, the interval of logger, default:10
 epochs: 120  #Mandatory, total epoch
 epochs: 120  #Mandatory, total epoch
 log_level: "INFO" #Optional, the logger level. default: "INFO"
 log_level: "INFO" #Optional, the logger level. default: "INFO"

+ 1 - 1
paddlex/repo_apis/PaddleVideo_api/configs/YOWO.yaml

@@ -136,7 +136,7 @@ label_list:
     - WalkingWithDog
     - WalkingWithDog
 
 
 model_name: "YOWO"
 model_name: "YOWO"
-log_interval: 20 #Optional, the interal of logger, default:10
+log_interval: 20 #Optional, the interval of logger, default:10
 save_interval: 1
 save_interval: 1
 epochs: 5 #Mandatory, total epoch
 epochs: 5 #Mandatory, total epoch
 log_level: "INFO" #Optional, the logger level. default: "INFO"
 log_level: "INFO" #Optional, the logger level. default: "INFO"