|
|
@@ -249,6 +249,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
input: Union[str, List[str], np.ndarray, List[np.ndarray]],
|
|
|
use_doc_orientation_classify: Optional[bool] = None,
|
|
|
use_doc_unwarping: Optional[bool] = None,
|
|
|
+ use_textline_orientation: Optional[bool] = None,
|
|
|
use_seal_recognition: Optional[bool] = None,
|
|
|
use_table_recognition: Optional[bool] = None,
|
|
|
layout_threshold: Optional[Union[float, dict]] = None,
|
|
|
@@ -279,6 +280,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
numpy array of an image, or list of numpy arrays.
|
|
|
use_doc_orientation_classify (bool): Flag to use document orientation classification.
|
|
|
use_doc_unwarping (bool): Flag to use document unwarping.
|
|
|
+ use_textline_orientation (Optional[bool]): Whether to use textline orientation prediction.
|
|
|
use_seal_recognition (bool): Flag to use seal recognition.
|
|
|
use_table_recognition (bool): Flag to use table recognition.
|
|
|
layout_threshold (Optional[float]): The threshold value to filter out low-confidence predictions. Default is None.
|
|
|
@@ -320,6 +322,7 @@ class PP_ChatOCRv4_Pipeline(PP_ChatOCR_Pipeline):
|
|
|
input,
|
|
|
use_doc_orientation_classify=use_doc_orientation_classify,
|
|
|
use_doc_unwarping=use_doc_unwarping,
|
|
|
+ use_textline_orientation=use_textline_orientation,
|
|
|
use_seal_recognition=use_seal_recognition,
|
|
|
use_table_recognition=use_table_recognition,
|
|
|
layout_threshold=layout_threshold,
|