Sfoglia il codice sorgente

add attribute recognition docs (#2410)

zhangyubo0722 1 anno fa
parent
commit
fa4377a78e

+ 9 - 9
README.md

@@ -244,7 +244,7 @@ PaddleX的各个产线均支持本地**快速推理**,部分模型支持在[AI
         <td>🚧</td>
     </tr>
     <tr>
-        <td><a href="./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute.md">行人属性识别</a></td>
+        <td><a href="./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute_recognition.md">行人属性识别</a></td>
         <td>🚧</td>
         <td>✅</td>
         <td>🚧</td>
@@ -254,7 +254,7 @@ PaddleX的各个产线均支持本地**快速推理**,部分模型支持在[AI
         <td>🚧</td>
     </tr>
     <tr>
-        <td><a href="./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute.md">车辆属性识别</a></td>
+        <td><a href="./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute_recognition.md">车辆属性识别</a></td>
         <td>🚧</td>
         <td>✅</td>
         <td>🚧</td>
@@ -445,8 +445,8 @@ paddlex --pipeline OCR --input https://paddle-model-ecology.bj.bcebos.com/paddle
 | 图像多标签分类 | `paddlex --pipeline multi_label_image_classification --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_image_classification_001.jpg --device gpu:0`        |
 | 小目标检测         | `paddlex --pipeline small_object_detection --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/small_object_detection.jpg --device gpu:0`                            |
 | 图像异常检测       | `paddlex --pipeline anomaly_detection --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/uad_grid.png --device gpu:0`                                              |
-| 行人属性识别       | `paddlex --pipeline pedestrian_attribute --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg --device gpu:0`                                              |
-| 车辆属性识别       | `paddlex --pipeline vehicle_attribute --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg --device gpu:0`                                              |
+| 行人属性识别       | `paddlex --pipeline pedestrian_attribute_recognition --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg --device gpu:0`                                              |
+| 车辆属性识别       | `paddlex --pipeline vehicle_attribute_recognition --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg --device gpu:0`                                              |
 | 通用OCR            | `paddlex --pipeline OCR --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/general_ocr_002.png --device gpu:0`                                                      |
 | 通用表格识别       | `paddlex --pipeline table_recognition --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/table_recognition.jpg --device gpu:0`                                      |
 | 通用版面解析       | `paddlex --pipeline layout_parsing --input https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/demo_paper.png --device gpu:0`                                      |
@@ -494,8 +494,8 @@ for res in output:
 | 图像异常检测       | `anomaly_detection`                | [图像异常检测产线Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/image_anomaly_detection.md#22-python脚本方式集成)                              |
 | 通用图像识别       | `PP-ShiTuV2`                | [通用图像识别Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/general_image_recognition.md#22-python脚本方式集成)                              |
 | 人脸识别       | `face_recognition`                | [人脸识别Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/face_recognition.md#22-python脚本方式集成)                              |
-| 车辆属性识别       | `vehicle_attribute`                | [车辆属性识别产线Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute.md#22-python脚本方式集成)                              |
-| 行人属性识别       | `pedestrian_attribute`                | [行人属性识别产线Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute.md#22-python脚本方式集成)                              |
+| 车辆属性识别       | `vehicle_attribute_recognition`                | [车辆属性识别产线Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute_recognition.md#22-python脚本方式集成)                              |
+| 行人属性识别       | `pedestrian_attribute_recognition`                | [行人属性识别产线Python脚本使用说明](./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute_recognition.md#22-python脚本方式集成)                              |
 | 通用OCR            | `OCR`                              | [通用OCR产线Python脚本使用说明](./docs/pipeline_usage/tutorials/ocr_pipelines/OCR.md#222-python脚本方式集成)                                                     |
 | 通用表格识别       | `table_recognition`                | [通用表格识别产线Python脚本使用说明](./docs/pipeline_usage/tutorials/ocr_pipelines/table_recognition.md#22-python脚本方式集成)                                   |
 | 通用版面解析       | `layout_parsing`                | [通用版面解析产线Python脚本使用说明](./docs/pipeline_usage/tutorials/ocr_pipelines/layout_parsing.md#22-python脚本方式集成)                                   |
@@ -551,8 +551,8 @@ for res in output:
    * [🖼️ 图像异常检测产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/image_anomaly_detection.md)
    * [🖼️ 通用图像识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/general_image_recognition.md)
    * [🆔人脸识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/face_recognition.md)
-   * [🚗 车辆属性识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute.md)
-   * [🚶‍♀️ 行人属性识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute.md)
+   * [🚗 车辆属性识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute_recognition.md)
+   * [🚶‍♀️ 行人属性识别产线使用教程](./docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute_recognition.md)
 
 
 * <details open>
@@ -667,7 +667,7 @@ for res in output:
 
 <details>
   <summary> <b> 📝 产业实践教程&范例 </b></summary>
-  
+
 * [📑 文档场景信息抽取v3模型产线———论文文献信息抽取应用教程](./docs/practical_tutorials/document_scene_information_extraction(layout_detection)_tutorial.md)
 * [📑 文档场景信息抽取v3模型产线———印章信息抽取应用教程](./docs/practical_tutorials/document_scene_information_extraction(seal_recognition)_tutorial.md)
 * [🖼️ 通用图像分类模型产线———垃圾分类教程](./docs/practical_tutorials/image_classification_garbage_tutorial.md)

+ 0 - 169
docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute.md

@@ -1,169 +0,0 @@
-简体中文 | [English](pedestrian_attribute_en.md)
-
-# 行人属性识别产线使用教程
-
-## 1. 行人属性识别产线介绍
-Stay tuned
-
-## 2. 快速开始
-PaddleX 所提供的预训练的模型产线均可以快速体验效果,你可以在线体验行人属性识别产线的效果,也可以在本地使用命令行或 Python 体验行人属性识别产线的效果。
-
-### 2.1 在线体验
-Stay tuned
-
-### 2.2 本地体验
-在本地使用行人属性识别产线前,请确保您已经按照[PaddleX本地安装教程](../../../installation/installation.md)完成了PaddleX的wheel包安装。
-
-#### 2.2.1 命令行方式体验
-一行命令即可快速体验行人属性识别产线效果,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg),并将 `--input` 替换为本地路径,进行预测
-
-```bash
-paddlex --pipeline pedestrian_attribute --input pedestrian_attribute_002.jpg --device gpu:0
-```
-参数说明:
-
-```
---pipeline:产线名称,此处为行人属性识别产线
---input:待处理的输入图片的本地路径或URL
---device 使用的GPU序号(例如gpu:0表示使用第0块GPU,gpu:1,2表示使用第1、2块GPU),也可选择使用CPU(--device cpu)
-```
-
-在执行上述 Python 脚本时,加载的是默认的行人属性识别产线配置文件,若您需要自定义配置文件,可执行如下命令获取:
-
-<details>
-   <summary> 👉点击展开</summary>
-
-```
-paddlex --get_pipeline_config pedestrian_attribute
-```
-执行后,行人属性识别产线配置文件将被保存在当前路径。若您希望自定义保存位置,可执行如下命令(假设自定义保存位置为 `./my_path` ):
-
-```
-paddlex --get_pipeline_config pedestrian_attribute --save_path ./my_path
-```
-
-获取产线配置文件后,可将 `--pipeline` 替换为配置文件保存路径,即可使配置文件生效。例如,若配置文件保存路径为 `./pedestrian_attribute.yaml`,只需执行:
-
-```bash
-paddlex --pipeline ./pedestrian_attribute.yaml --input pedestrian_attribute_002.jpg --device gpu:0
-```
-其中,`--model`、`--device` 等参数无需指定,将使用配置文件中的参数。若依然指定了参数,将以指定的参数为准。
-
-</details>
-
-运行后,得到的结果为:
-
-```
-{'input_path': 'pedestrian_attribute_002.jpg', 'boxes': [{'labels': ['Trousers(长裤)', 'Age18-60(年龄在18-60岁之间)', 'LongCoat(长外套)', 'Side(侧面)'], 'cls_scores': array([0.99965, 0.99963, 0.98866, 0.9624 ]), 'det_score': 0.9795265793800354, 'coordinate': [87.24845, 322.57797, 546.27014, 1039.9806]}, {'labels': ['Trousers(长裤)', 'LongCoat(长外套)', 'Front(面朝前)', 'Age18-60(年龄在18-60岁之间)'], 'cls_scores': array([0.99996, 0.99872, 0.93379, 0.71614]), 'det_score': 0.9671529531478882, 'coordinate': [737.9159, 306.28375, 1150.6005, 1034.2983]}, {'labels': ['Trousers(长裤)', 'LongCoat(长外套)', 'Age18-60(年龄在18-60岁之间)', 'Side(侧面)'], 'cls_scores': array([0.99996, 0.99514, 0.98726, 0.96224]), 'det_score': 0.9645677208900452, 'coordinate': [399.46594, 281.90945, 869.5361, 1038.995]}]}
-```
-
-#### 2.2.2 Python脚本方式集成
-几行代码即可完成产线的快速推理,以行人属性识别产线为例:
-
-```
-from paddlex import create_pipeline
-
-pipeline = create_pipeline(pipeline="pedestrian_attribute")
-
-output = pipeline.predict("pedestrian_attribute_002.jpg")
-for res in output:
-    res.print() ## 打印预测的结构化输出
-    res.save_to_img("./output/") ## 保存结果可视化图像
-    res.save_to_json("./output/") ## 保存预测的结构化输出
-```
-得到的结果与命令行方式相同。
-
-在上述 Python 脚本中,执行了如下几个步骤:
-
-(1)实例化 `create_pipeline` 实例化产线对象:具体参数说明如下:
-
-|参数|参数说明|参数类型|默认值|
-|-|-|-|-|
-|`pipeline`|产线名称或是产线配置文件路径。如为产线名称,则必须为 PaddleX 所支持的产线。|`str`|无|
-|`device`|产线模型推理设备。支持:“gpu”,“cpu”。|`str`|`gpu`|
-|`use_hpip`|是否启用高性能推理,仅当该产线支持高性能推理时可用。|`bool`|`False`|
-
-(2)调用行人属性识别产线对象的 `predict` 方法进行推理预测:`predict` 方法参数为`x`,用于输入待预测数据,支持多种输入方式,具体示例如下:
-
-| 参数类型      | 参数说明                                                                                                  |
-|---------------|-----------------------------------------------------------------------------------------------------------|
-| Python Var    | 支持直接传入Python变量,如numpy.ndarray表示的图像数据。                                               |
-| str         | 支持传入待预测数据文件路径,如图像文件的本地路径:`/root/data/img.jpg`。                                   |
-| str           | 支持传入待预测数据文件URL,如图像文件的网络URL:[示例](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg)。|
-| str           | 支持传入本地目录,该目录下需包含待预测数据文件,如本地路径:`/root/data/`。                               |
-| dict          | 支持传入字典类型,字典的key需与具体任务对应,如行人属性识别任务对应\"img\",字典的val支持上述类型数据,例如:`{\"img\": \"/root/data1\"}`。|
-| list          | 支持传入列表,列表元素需为上述类型数据,如`[numpy.ndarray, numpy.ndarray],[\"/root/data/img1.jpg\", \"/root/data/img2.jpg\"]`,`[\"/root/data1\", \"/root/data2\"]`,`[{\"img\": \"/root/data1\"}, {\"img\": \"/root/data2/img.jpg\"}]`。|
-
-(3)调用`predict`方法获取预测结果:`predict` 方法为`generator`,因此需要通过调用获得预测结果,`predict`方法以batch为单位对数据进行预测,因此预测结果为list形式表示的一组预测结果。
-
-(4)对预测结果进行处理:每个样本的预测结果均为`dict`类型,且支持打印,或保存为文件,支持保存的类型与具体产线相关,如:
-
-| 方法         | 说明                        | 方法参数                                                                                               |
-|--------------|-----------------------------|--------------------------------------------------------------------------------------------------------|
-| print        | 打印结果到终端              | `- format_json`:bool类型,是否对输出内容进行使用json缩进格式化,默认为True;<br>`- indent`:int类型,json格式化设置,仅当format_json为True时有效,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,仅当format_json为True时有效,默认为False; |
-| save_to_json | 将结果保存为json格式的文件   | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;<br>`- indent`:int类型,json格式化设置,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,默认为False; |
-| save_to_img  | 将结果保存为图像格式的文件  | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致; |
-
-若您获取了配置文件,即可对行人属性识别产线各项配置进行自定义,只需要修改 `create_pipeline` 方法中的 `pipeline` 参数值为产线配置文件路径即可。
-
-例如,若您的配置文件保存在 `./my_path/pedestrian_attribute*.yaml` ,则只需执行:
-
-```
-from paddlex import create_pipeline
-pipeline = create_pipeline(pipeline="./my_path/pedestrian_attribute.yaml")
-output = pipeline.predict("pedestrian_attribute_002.jpg")
-for res in output:
-    res.print() ## 打印预测的结构化输出
-    res.save_to_img("./output/") ## 保存结果可视化图像
-    res.save_to_json("./output/") ## 保存预测的结构化输出
-```
-## 3. 开发集成/部署
-如果产线可以达到您对产线推理速度和精度的要求,您可以直接进行开发集成/部署。
-
-若您需要将产线直接应用在您的Python项目中,可以参考 [2.2.2 Python脚本方式](#222-python脚本方式集成)中的示例代码。
-
-此外,PaddleX 也提供了其他三种部署方式,详细说明如下:
-
-🚀 **高性能推理**:在实际生产环境中,许多应用对部署策略的性能指标(尤其是响应速度)有着较严苛的标准,以确保系统的高效运行与用户体验的流畅性。为此,PaddleX 提供高性能推理插件,旨在对模型推理及前后处理进行深度性能优化,实现端到端流程的显著提速,详细的高性能推理流程请参考[PaddleX高性能推理指南](../../../pipeline_deploy/high_performance_inference.md)。
-
-☁️ **服务化部署**:服务化部署是实际生产环境中常见的一种部署形式。通过将推理功能封装为服务,客户端可以通过网络请求来访问这些服务,以获取推理结果。PaddleX 支持用户以低成本实现产线的服务化部署,详细的服务化部署流程请参考[PaddleX服务化部署指南](../../../pipeline_deploy/service_deploy.md)。
-
-📱 **端侧部署**:端侧部署是一种将计算和数据处理功能放在用户设备本身上的方式,设备可以直接处理数据,而不需要依赖远程的服务器。PaddleX 支持将模型部署在 Android 等端侧设备上,详细的端侧部署流程请参考[PaddleX端侧部署指南](../../../pipeline_deploy/edge_deploy.md)。
-您可以根据需要选择合适的方式部署模型产线,进而进行后续的 AI 应用集成。
-
-## 4. 二次开发
-如果行人属性识别产线提供的默认模型权重在您的场景中,精度或速度不满意,您可以尝试利用**您自己拥有的特定领域或应用场景的数据**对现有模型进行进一步的**微调**,以提升行人属性识别产线的在您的场景中的识别效果。
-
-### 4.1 模型微调
-由于行人属性识别产线包含行人属性识别模块和行人检测模块,如果模型产线的效果不及预期可能来自于其中任何一个模块。
-您可以对识别效果差的图片进行分析,如果在分析过程中发现有较多的主体目标未被检测出来,那么可能是行人检测模型存在不足那么您需要参考[行人检测模块开发教程](../../../module_usage/tutorials/cv_modules/human_detection.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/human_detection.md#四二次开发)章节,使用您的私有数据集对行人检测模型进行微调;如果检测出来的主体属性识别错误,那么您需要参考[行人属性识别模块开发教程](../../../module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md#四二次开发)章节,使用您的私有数据集对行人属性识别模型进行微调。
-
-### 4.2 模型应用
-当您使用私有数据集完成微调训练后,可获得本地模型权重文件。
-
-若您需要使用微调后的模型权重,只需对产线配置文件做修改,将微调后模型权重的本地路径替换至产线配置文件中的对应位置即可:
-
-```
-......
-Pipeline:
-  model: PP-LCNet_x1_0  #可修改为微调后模型的本地路径
-  device: "gpu"
-  batch_size: 1
-......
-```
-随后, 参考本地体验中的命令行方式或 Python 脚本方式,加载修改后的产线配置文件即可。
-
-##  5. 多硬件支持
-PaddleX 支持英伟达 GPU、昆仑芯 XPU、昇腾 NPU和寒武纪 MLU 等多种主流硬件设备,**仅需修改 `--device` 参数**即可完成不同硬件之间的无缝切换。
-
-例如,您使用英伟达 GPU 进行行人属性识别产线的推理,使用的命令为:
-
-```bash
-paddlex --pipeline pedestrian_attribute --input pedestrian_attribute_002.jpg --device gpu:0
-```
-此时,若您想将硬件切换为昇腾 NPU,仅需将 `--device` 修改为 npu:0 即可:
-
-```bash
-paddlex --pipeline pedestrian_attribute --input pedestrian_attribute_002.jpg --device npu:0
-```
-若您想在更多种类的硬件上使用行人属性识别产线,请参考[PaddleX多硬件使用指南](../../../other_devices_support/multi_devices_use_guide.md)。

+ 719 - 0
docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute_recognition.md

@@ -0,0 +1,719 @@
+简体中文 | [English](pedestrian_attribute_recognition_en.md)
+
+# 行人属性识别产线使用教程
+
+## 1. 行人属性识别产线介绍
+行人属性识别是计算机视觉系统中的关键功能,用于在图像或视频中定位并标记行人的特定特征,如性别、年龄、衣物颜色和款式等。该任务不仅要求准确检测出行人,还需识别每个行人的详细属性信息。行人属性识别产线是定位并识别行人属性的端到端串联系统,广泛应用于智慧城市和安防监控等领域,可显著提升系统的智能化水平和管理效率。
+
+![](https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/pipelines/pedestrian_attribute_recognition/01.jpg)
+
+**行人属性识别产线中包含了行人检测模块和行人属性识别模块**,每个模块中包含了若干模型,具体使用哪些模型,您可以根据下边的 benchmark 数据来选择。**如您更考虑模型精度,请选择精度较高的模型,如您更考虑模型推理速度,请选择推理速度较快的模型,如您更考虑模型存储大小,请选择存储大小较小的模型**。
+
+<details>
+   <summary> 👉模型列表详情</summary>
+
+**行人检测模块:**
+
+<table>
+  <tr>
+    <th >模型</th>
+    <th >mAP(0.5:0.95)</th>
+    <th >mAP(0.5)</th>
+    <th >GPU推理耗时(ms)</th>
+    <th >CPU推理耗时 (ms)</th>
+    <th >模型存储大小(M)</th>
+    <th >介绍</th>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-L_human</td>
+    <td>48.0</td>
+    <td>81.9</td>
+    <td>32.8</td>
+    <td>777.7</td>
+    <td>196.02</td>
+    <td rowspan="2">基于PP-YOLOE的行人检测模型</td>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-S_human</td>
+    <td>42.5</td>
+    <td>77.9</td>
+    <td>15.0</td>
+    <td>179.3</td>
+    <td>28.79</td>
+  </tr>
+</table>
+
+**注:以上精度指标为CrowdHuman数据集 mAP(0.5:0.95)。所有模型 GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为8,精度类型为 FP32。**
+
+**行人属性识别模块:**
+
+|模型|mA(%)|GPU推理耗时(ms)|CPU推理耗时 (ms)|模型存储大小(M)|介绍|
+|-|-|-|-|-|-|
+|PP-LCNet_x1_0_pedestrian_attribute|92.2|3.84845|9.23735|6.7 M  |PP-LCNet_x1_0_pedestrian_attribute 是一种基于PP-LCNet的轻量级行人属性识别模型,包含26个类别|
+
+**注:以上精度指标为 PaddleX 内部自建数据集 mA。GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为 8,精度类型为 FP32。**
+
+</details>
+
+## 2. 快速开始
+PaddleX 所提供的预训练的模型产线均可以快速体验效果,你可以在线体验行人属性识别产线的效果,也可以在本地使用命令行或 Python 体验行人属性识别产线的效果。
+
+### 2.1 在线体验
+暂不支持在线体验
+
+### 2.2 本地体验
+在本地使用行人属性识别产线前,请确保您已经按照[PaddleX本地安装教程](../../../installation/installation.md)完成了PaddleX的wheel包安装。
+
+#### 2.2.1 命令行方式体验
+一行命令即可快速体验行人属性识别产线效果,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg),并将 `--input` 替换为本地路径,进行预测
+
+```bash
+paddlex --pipeline pedestrian_attribute_recognition --input pedestrian_attribute_002.jpg --device gpu:0
+```
+参数说明:
+
+```
+--pipeline:产线名称,此处为行人属性识别产线
+--input:待处理的输入图片的本地路径或URL
+--device 使用的GPU序号(例如gpu:0表示使用第0块GPU,gpu:1,2表示使用第1、2块GPU),也可选择使用CPU(--device cpu)
+```
+
+在执行上述 Python 脚本时,加载的是默认的行人属性识别产线配置文件,若您需要自定义配置文件,可执行如下命令获取:
+
+<details>
+   <summary> 👉点击展开</summary>
+
+```
+paddlex --get_pipeline_config pedestrian_attribute_recognition
+```
+执行后,行人属性识别产线配置文件将被保存在当前路径。若您希望自定义保存位置,可执行如下命令(假设自定义保存位置为 `./my_path` ):
+
+```
+paddlex --get_pipeline_config pedestrian_attribute_recognition --save_path ./my_path
+```
+
+获取产线配置文件后,可将 `--pipeline` 替换为配置文件保存路径,即可使配置文件生效。例如,若配置文件保存路径为 `./pedestrian_attribute_recognition.yaml`,只需执行:
+
+```bash
+paddlex --pipeline ./pedestrian_attribute_recognition.yaml --input pedestrian_attribute_002.jpg --device gpu:0
+```
+其中,`--model`、`--device` 等参数无需指定,将使用配置文件中的参数。若依然指定了参数,将以指定的参数为准。
+
+</details>
+
+#### 2.2.2 Python脚本方式集成
+几行代码即可完成产线的快速推理,以行人属性识别产线为例:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="pedestrian_attribute_recognition")
+
+output = pipeline.predict("pedestrian_attribute_002.jpg")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_img("./output/") ## 保存结果可视化图像
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+得到的结果与命令行方式相同。
+
+在上述 Python 脚本中,执行了如下几个步骤:
+
+(1)实例化 `create_pipeline` 实例化产线对象:具体参数说明如下:
+
+|参数|参数说明|参数类型|默认值|
+|-|-|-|-|
+|`pipeline`|产线名称或是产线配置文件路径。如为产线名称,则必须为 PaddleX 所支持的产线。|`str`|无|
+|`device`|产线模型推理设备。支持:“gpu”,“cpu”。|`str`|`gpu`|
+|`use_hpip`|是否启用高性能推理,仅当该产线支持高性能推理时可用。|`bool`|`False`|
+
+(2)调用行人属性识别产线对象的 `predict` 方法进行推理预测:`predict` 方法参数为`x`,用于输入待预测数据,支持多种输入方式,具体示例如下:
+
+| 参数类型      | 参数说明                                                                                                  |
+|---------------|-----------------------------------------------------------------------------------------------------------|
+| Python Var    | 支持直接传入Python变量,如numpy.ndarray表示的图像数据。                                               |
+| str         | 支持传入待预测数据文件路径,如图像文件的本地路径:`/root/data/img.jpg`。                                   |
+| str           | 支持传入待预测数据文件URL,如图像文件的网络URL:[示例](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg)。|
+| str           | 支持传入本地目录,该目录下需包含待预测数据文件,如本地路径:`/root/data/`。                               |
+| dict          | 支持传入字典类型,字典的key需与具体任务对应,如行人属性识别任务对应\"img\",字典的val支持上述类型数据,例如:`{\"img\": \"/root/data1\"}`。|
+| list          | 支持传入列表,列表元素需为上述类型数据,如`[numpy.ndarray, numpy.ndarray],[\"/root/data/img1.jpg\", \"/root/data/img2.jpg\"]`,`[\"/root/data1\", \"/root/data2\"]`,`[{\"img\": \"/root/data1\"}, {\"img\": \"/root/data2/img.jpg\"}]`。|
+
+(3)调用`predict`方法获取预测结果:`predict` 方法为`generator`,因此需要通过调用获得预测结果,`predict`方法以batch为单位对数据进行预测,因此预测结果为list形式表示的一组预测结果。
+
+(4)对预测结果进行处理:每个样本的预测结果均为`dict`类型,且支持打印,或保存为文件,支持保存的类型与具体产线相关,如:
+
+| 方法         | 说明                        | 方法参数                                                                                               |
+|--------------|-----------------------------|--------------------------------------------------------------------------------------------------------|
+| print        | 打印结果到终端              | `- format_json`:bool类型,是否对输出内容进行使用json缩进格式化,默认为True;<br>`- indent`:int类型,json格式化设置,仅当format_json为True时有效,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,仅当format_json为True时有效,默认为False; |
+| save_to_json | 将结果保存为json格式的文件   | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;<br>`- indent`:int类型,json格式化设置,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,默认为False; |
+| save_to_img  | 将结果保存为图像格式的文件  | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致; |
+
+若您获取了配置文件,即可对行人属性识别产线各项配置进行自定义,只需要修改 `create_pipeline` 方法中的 `pipeline` 参数值为产线配置文件路径即可。
+
+例如,若您的配置文件保存在 `./my_path/pedestrian_attribute_recognition*.yaml` ,则只需执行:
+
+```python
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/pedestrian_attribute_recognition.yaml")
+output = pipeline.predict("pedestrian_attribute_002.jpg")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_img("./output/") ## 保存结果可视化图像
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+## 3. 开发集成/部署
+如果产线可以达到您对产线推理速度和精度的要求,您可以直接进行开发集成/部署。
+
+若您需要将产线直接应用在您的Python项目中,可以参考 [2.2.2 Python脚本方式](#222-python脚本方式集成)中的示例代码。
+
+此外,PaddleX 也提供了其他三种部署方式,详细说明如下:
+
+🚀 **高性能推理**:在实际生产环境中,许多应用对部署策略的性能指标(尤其是响应速度)有着较严苛的标准,以确保系统的高效运行与用户体验的流畅性。为此,PaddleX 提供高性能推理插件,旨在对模型推理及前后处理进行深度性能优化,实现端到端流程的显著提速,详细的高性能推理流程请参考[PaddleX高性能推理指南](../../../pipeline_deploy/high_performance_inference.md)。
+
+☁️ **服务化部署**:服务化部署是实际生产环境中常见的一种部署形式。通过将推理功能封装为服务,客户端可以通过网络请求来访问这些服务,以获取推理结果。PaddleX 支持用户以低成本实现产线的服务化部署,详细的服务化部署流程请参考[PaddleX服务化部署指南](../../../pipeline_deploy/service_deploy.md)。
+
+下面是API参考和多语言服务调用示例:
+
+<details>
+<summary>API参考</summary>
+
+对于服务提供的所有操作:
+
+- 响应体以及POST请求的请求体均为JSON数据(JSON对象)。
+- 当请求处理成功时,响应状态码为`200`,响应体的属性如下:
+
+    |名称|类型|含义|
+    |-|-|-|
+    |`errorCode`|`integer`|错误码。固定为`0`。|
+    |`errorMsg`|`string`|错误说明。固定为`"Success"`。|
+
+    响应体还可能有`result`属性,类型为`object`,其中存储操作结果信息。
+
+- 当请求处理未成功时,响应体的属性如下:
+
+    |名称|类型|含义|
+    |-|-|-|
+    |`errorCode`|`integer`|错误码。与响应状态码相同。|
+    |`errorMsg`|`string`|错误说明。|
+
+服务提供的操作如下:
+
+- **`infer`**
+
+    获取图像OCR结果。
+
+    `POST /ocr`
+
+    - 请求体的属性如下:
+
+        |名称|类型|含义|是否必填|
+        |-|-|-|-|
+        |`image`|`string`|服务可访问的图像文件的URL或图像文件内容的Base64编码结果。|是|
+        |`inferenceParams`|`object`|推理参数。|否|
+
+        `inferenceParams`的属性如下:
+
+        |名称|类型|含义|是否必填|
+        |-|-|-|-|
+        |`maxLongSide`|`integer`|推理时,若文本检测模型的输入图像较长边的长度大于`maxLongSide`,则将对图像进行缩放,使其较长边的长度等于`maxLongSide`。|否|
+
+    - 请求处理成功时,响应体的`result`具有如下属性:
+
+        |名称|类型|含义|
+        |-|-|-|
+        |`texts`|`array`|文本位置、内容和得分。|
+        |`image`|`string`|OCR结果图,其中标注检测到的文本位置。图像为JPEG格式,使用Base64编码。|
+
+        `texts`中的每个元素为一个`object`,具有如下属性:
+
+        |名称|类型|含义|
+        |-|-|-|
+        |`poly`|`array`|文本位置。数组中元素依次为包围文本的多边形的顶点坐标。|
+        |`text`|`string`|文本内容。|
+        |`score`|`number`|文本识别得分。|
+
+        `result`示例如下:
+
+        ```json
+        {
+          "texts": [
+            {
+              "poly": [
+                [
+                  444,
+                  244
+                ],
+                [
+                  705,
+                  244
+                ],
+                [
+                  705,
+                  311
+                ],
+                [
+                  444,
+                  311
+                ]
+              ],
+              "text": "北京南站",
+              "score": 0.9
+            },
+            {
+              "poly": [
+                [
+                  992,
+                  248
+                ],
+                [
+                  1263,
+                  251
+                ],
+                [
+                  1263,
+                  318
+                ],
+                [
+                  992,
+                  315
+                ]
+              ],
+              "text": "天津站",
+              "score": 0.5
+            }
+          ],
+          "image": "xxxxxx"
+        }
+        ```
+
+</details>
+
+<details>
+<summary>多语言调用服务示例</summary>
+
+<details>
+<summary>Python</summary>
+
+```python
+import base64
+import requests
+
+API_URL = "http://localhost:8080/ocr" # 服务URL
+image_path = "./demo.jpg"
+output_image_path = "./out.jpg"
+
+# 对本地图像进行Base64编码
+with open(image_path, "rb") as file:
+    image_bytes = file.read()
+    image_data = base64.b64encode(image_bytes).decode("ascii")
+
+payload = {"image": image_data}  # Base64编码的文件内容或者图像URL
+
+# 调用API
+response = requests.post(API_URL, json=payload)
+
+# 处理接口返回数据
+assert response.status_code == 200
+result = response.json()["result"]
+with open(output_image_path, "wb") as file:
+    file.write(base64.b64decode(result["image"]))
+print(f"Output image saved at {output_image_path}")
+print("\nDetected texts:")
+print(result["texts"])
+```
+
+</details>
+
+<details>
+<summary>C++</summary>
+
+```cpp
+#include <iostream>
+#include "cpp-httplib/httplib.h" // https://github.com/Huiyicc/cpp-httplib
+#include "nlohmann/json.hpp" // https://github.com/nlohmann/json
+#include "base64.hpp" // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client("localhost:8080");
+    const std::string imagePath = "./demo.jpg";
+    const std::string outputImagePath = "./out.jpg";
+
+    httplib::Headers headers = {
+        {"Content-Type", "application/json"}
+    };
+
+    // 对本地图像进行Base64编码
+    std::ifstream file(imagePath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector<char> buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr << "Error reading file." << std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast<const char*>(buffer.data()), buffer.size());
+    std::string encodedImage = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj["image"] = encodedImage;
+    std::string body = jsonObj.dump();
+
+    // 调用API
+    auto response = client.Post("/ocr", headers, body, "application/json");
+    // 处理接口返回数据
+    if (response && response->status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response->body);
+        auto result = jsonResponse["result"];
+
+        encodedImage = result["image"];
+        std::string decodedString = base64::from_base64(encodedImage);
+        std::vector<unsigned char> decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outPutImagePath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast<char*>(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout << "Output image saved at " << outPutImagePath << std::endl;
+        } else {
+            std::cerr << "Unable to open file for writing: " << outPutImagePath << std::endl;
+        }
+
+        auto texts = result["texts"];
+        std::cout << "\nDetected texts:" << std::endl;
+        for (const auto& text : texts) {
+            std::cout << text << std::endl;
+        }
+    } else {
+        std::cout << "Failed to send HTTP request." << std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+```
+
+</details>
+
+<details>
+<summary>Java</summary>
+
+```java
+import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = "http://localhost:8080/ocr"; // 服务URL
+        String imagePath = "./demo.jpg"; // 本地图像
+        String outputImagePath = "./out.jpg"; // 输出图像
+
+        // 对本地图像进行Base64编码
+        File file = new File(imagePath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String imageData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put("image", imageData); // Base64编码的文件内容或者图像URL
+
+        // 创建 OkHttpClient 实例
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.Companion.get("application/json; charset=utf-8");
+        RequestBody body = RequestBody.Companion.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        // 调用API并处理接口返回数据
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get("result");
+                String base64Image = result.get("image").asText();
+                JsonNode texts = result.get("texts");
+
+                byte[] imageBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputImagePath)) {
+                    fos.write(imageBytes);
+                }
+                System.out.println("Output image saved at " + outputImagePath);
+                System.out.println("\nDetected texts: " + texts.toString());
+            } else {
+                System.err.println("Request failed with code: " + response.code());
+            }
+        }
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Go</summary>
+
+```go
+package main
+
+import (
+    "bytes"
+    "encoding/base64"
+    "encoding/json"
+    "fmt"
+    "io/ioutil"
+    "net/http"
+)
+
+func main() {
+    API_URL := "http://localhost:8080/ocr"
+    imagePath := "./demo.jpg"
+    outputImagePath := "./out.jpg"
+
+    // 对本地图像进行Base64编码
+    imageBytes, err := ioutil.ReadFile(imagePath)
+    if err != nil {
+        fmt.Println("Error reading image file:", err)
+        return
+    }
+    imageData := base64.StdEncoding.EncodeToString(imageBytes)
+
+    payload := map[string]string{"image": imageData} // Base64编码的文件内容或者图像URL
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println("Error marshaling payload:", err)
+        return
+    }
+
+    // 调用API
+    client := &http.Client{}
+    req, err := http.NewRequest("POST", API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println("Error creating request:", err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println("Error sending request:", err)
+        return
+    }
+    defer res.Body.Close()
+
+    // 处理接口返回数据
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println("Error reading response body:", err)
+        return
+    }
+    type Response struct {
+        Result struct {
+            Image      string   `json:"image"`
+            Texts []map[string]interface{} `json:"texts"`
+        } `json:"result"`
+    }
+    var respData Response
+    err = json.Unmarshal([]byte(string(body)), &respData)
+    if err != nil {
+        fmt.Println("Error unmarshaling response body:", err)
+        return
+    }
+
+    outputImageData, err := base64.StdEncoding.DecodeString(respData.Result.Image)
+    if err != nil {
+        fmt.Println("Error decoding base64 image data:", err)
+        return
+    }
+    err = ioutil.WriteFile(outputImagePath, outputImageData, 0644)
+    if err != nil {
+        fmt.Println("Error writing image to file:", err)
+        return
+    }
+    fmt.Printf("Image saved at %s.jpg\n", outputImagePath)
+    fmt.Println("\nDetected texts:")
+    for _, text := range respData.Result.Texts {
+        fmt.Println(text)
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>C#</summary>
+
+```csharp
+using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = "http://localhost:8080/ocr";
+    static readonly string imagePath = "./demo.jpg";
+    static readonly string outputImagePath = "./out.jpg";
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        // 对本地图像进行Base64编码
+        byte[] imageBytes = File.ReadAllBytes(imagePath);
+        string image_data = Convert.ToBase64String(imageBytes);
+
+        var payload = new JObject{ { "image", image_data } }; // Base64编码的文件内容或者图像URL
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json");
+
+        // 调用API
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        // 处理接口返回数据
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse["result"]["image"].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputImagePath, outputImageBytes);
+        Console.WriteLine($"Output image saved at {outputImagePath}");
+        Console.WriteLine("\nDetected texts:");
+        Console.WriteLine(jsonResponse["result"]["texts"].ToString());
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Node.js</summary>
+
+```js
+const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/ocr'
+const imagePath = './demo.jpg'
+const outputImagePath = "./out.jpg";
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'image': encodeImageToBase64(imagePath)  // Base64编码的文件内容或者图像URL
+  })
+};
+
+// 对本地图像进行Base64编码
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+// 调用API
+axios.request(config)
+.then((response) => {
+    // 处理接口返回数据
+    const result = response.data["result"];
+    const imageBuffer = Buffer.from(result["image"], 'base64');
+    fs.writeFile(outputImagePath, imageBuffer, (err) => {
+      if (err) throw err;
+      console.log(`Output image saved at ${outputImagePath}`);
+    });
+    console.log("\nDetected texts:");
+    console.log(result["texts"]);
+})
+.catch((error) => {
+  console.log(error);
+});
+```
+
+</details>
+
+<details>
+<summary>PHP</summary>
+
+```php
+<?php
+
+$API_URL = "http://localhost:8080/ocr"; // 服务URL
+$image_path = "./demo.jpg";
+$output_image_path = "./out.jpg";
+
+// 对本地图像进行Base64编码
+$image_data = base64_encode(file_get_contents($image_path));
+$payload = array("image" => $image_data); // Base64编码的文件内容或者图像URL
+
+// 调用API
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+// 处理接口返回数据
+$result = json_decode($response, true)["result"];
+file_put_contents($output_image_path, base64_decode($result["image"]));
+echo "Output image saved at " . $output_image_path . "\n";
+echo "\nDetected texts:\n";
+print_r($result["texts"]);
+
+?>
+```
+
+</details>
+</details>
+<br/>
+
+📱 **端侧部署**:端侧部署是一种将计算和数据处理功能放在用户设备本身上的方式,设备可以直接处理数据,而不需要依赖远程的服务器。PaddleX 支持将模型部署在 Android 等端侧设备上,详细的端侧部署流程请参考[PaddleX端侧部署指南](../../../pipeline_deploy/edge_deploy.md)。
+您可以根据需要选择合适的方式部署模型产线,进而进行后续的 AI 应用集成。
+
+## 4. 二次开发
+如果行人属性识别产线提供的默认模型权重在您的场景中,精度或速度不满意,您可以尝试利用**您自己拥有的特定领域或应用场景的数据**对现有模型进行进一步的**微调**,以提升行人属性识别产线的在您的场景中的识别效果。
+
+### 4.1 模型微调
+由于行人属性识别产线包含行人属性识别模块和行人检测模块,如果模型产线的效果不及预期可能来自于其中任何一个模块。
+您可以对识别效果差的图片进行分析,如果在分析过程中发现有较多的主体目标未被检测出来,那么可能是行人检测模型存在不足那么您需要参考[行人检测模块开发教程](../../../module_usage/tutorials/cv_modules/human_detection.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/human_detection.md#四二次开发)章节,使用您的私有数据集对行人检测模型进行微调;如果检测出来的主体属性识别错误,那么您需要参考[行人属性识别模块开发教程](../../../module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/pedestrian_attribute_recognition.md#四二次开发)章节,使用您的私有数据集对行人属性识别模型进行微调。
+
+### 4.2 模型应用
+当您使用私有数据集完成微调训练后,可获得本地模型权重文件。
+
+若您需要使用微调后的模型权重,只需对产线配置文件做修改,将微调后模型权重的本地路径替换至产线配置文件中的对应位置即可:
+
+```
+......
+Pipeline:
+  model: PP-LCNet_x1_0  #可修改为微调后模型的本地路径
+  device: "gpu"
+  batch_size: 1
+......
+```
+随后, 参考本地体验中的命令行方式或 Python 脚本方式,加载修改后的产线配置文件即可。
+
+##  5. 多硬件支持
+PaddleX 支持英伟达 GPU、昆仑芯 XPU、昇腾 NPU和寒武纪 MLU 等多种主流硬件设备,**仅需修改 `--device` 参数**即可完成不同硬件之间的无缝切换。
+
+例如,您使用英伟达 GPU 进行行人属性识别产线的推理,使用的命令为:
+
+```bash
+paddlex --pipeline pedestrian_attribute_recognition --input pedestrian_attribute_002.jpg --device gpu:0
+```
+此时,若您想将硬件切换为昇腾 NPU,仅需将 `--device` 修改为 npu:0 即可:
+
+```bash
+paddlex --pipeline pedestrian_attribute_recognition --input pedestrian_attribute_002.jpg --device npu:0
+```
+若您想在更多种类的硬件上使用行人属性识别产线,请参考[PaddleX多硬件使用指南](../../../other_devices_support/multi_devices_use_guide.md)。

+ 618 - 0
docs/pipeline_usage/tutorials/cv_pipelines/pedestrian_attribute_recognition_en.md

@@ -0,0 +1,618 @@
+English | [简体中文](pedestrian_attribute_recognition.md)
+
+# Pedestrian Attribute Recognition Pipeline Tutorial
+
+## 1. Introduction to Pedestrian Attribute Recognition Pipeline
+Pedestrian attribute recognition is a key function in computer vision systems, used to locate and label specific characteristics of pedestrians in images or videos, such as gender, age, clothing color, and style. This task not only requires accurately detecting pedestrians but also identifying detailed attribute information for each pedestrian. The pedestrian attribute recognition pipeline is an end-to-end serial system for locating and recognizing pedestrian attributes, widely used in smart cities, security surveillance, and other fields, significantly enhancing the system's intelligence level and management efficiency.
+
+![](https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/pipelines/pedestrian_attribute_recognition/01.jpg)
+
+**The pedestrian attribute recognition pipeline includes a pedestrian detection module and a pedestrian attribute recognition module**, with several models in each module. Which models to use specifically can be selected based on the benchmark data below. **If you prioritize model accuracy, choose models with higher accuracy; if you prioritize inference speed, choose models with faster inference; if you prioritize model storage size, choose models with smaller storage**.
+
+<details>
+   <summary> 👉Model List Details</summary>
+
+**Pedestrian Detection Module**:
+
+<table>
+  <tr>
+    <th>Model</th>
+    <th>mAP(0.5:0.95)</th>
+    <th>mAP(0.5)</th>
+    <th>GPU Inference Time (ms)</th>
+    <th>CPU Inference Time (ms)</th>
+    <th>Model Size (M)</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-L_human</td>
+    <td>48.0</td>
+    <td>81.9</td>
+    <td>32.8</td>
+    <td>777.7</td>
+    <td>196.02</td>
+    <td rowspan="2">Pedestrian detection model based on PP-YOLOE</td>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-S_human</td>
+    <td>42.5</td>
+    <td>77.9</td>
+    <td>15.0</td>
+    <td>179.3</td>
+    <td>28.79</td>
+  </tr>
+</table>
+
+**Note: The above accuracy metrics are mAP(0.5:0.95) on the CrowdHuman dataset. All model GPU inference times are based on an NVIDIA Tesla T4 machine with FP32 precision. CPU inference speeds are based on an Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz with 8 threads and FP32 precision.**
+
+**Pedestrian Attribute Recognition Module**:
+
+|Model|mA (%)|GPU Inference Time (ms)|CPU Inference Time (ms)|Model Size (M)|Description|
+|-|-|-|-|-|-|
+|PP-LCNet_x1_0_pedestrian_attribute|92.2|3.84845|9.23735|6.7 M|PP-LCNet_x1_0_pedestrian_attribute is a lightweight pedestrian attribute recognition model based on PP-LCNet, covering 26 categories.|
+
+**Note: The above accuracy metrics are mA on PaddleX's internally built dataset. GPU inference times are based on an NVIDIA Tesla T4 machine with FP32 precision. CPU inference speeds are based on an Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz with 8 threads and FP32 precision.**
+
+</details>
+
+## 2. Quick Start
+The pre-trained model pipelines provided by PaddleX can quickly demonstrate their effectiveness. You can experience the pedestrian attribute recognition pipeline online or locally using command line or Python.
+
+### 2.1 Online Experience
+Not supported yet.
+
+### 2.2 Local Experience
+Before using the pedestrian attribute recognition pipeline locally, ensure you have completed the installation of the PaddleX wheel package following the [PaddleX Local Installation Tutorial](../../../installation/installation.md).
+
+#### 2.2.1 Command Line Experience
+You can quickly experience the pedestrian attribute recognition pipeline with a single command. Use the [test file](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg) and replace `--input` with the local path for prediction.
+
+```bash
+paddlex --pipeline pedestrian_attribute_recognition --input pedestrian_attribute_002.jpg --device gpu:0
+```
+Parameter Description:
+
+```
+--pipeline: The name of the pipeline, here it is the pedestrian attribute recognition pipeline.
+--input: The local path or URL of the input image to be processed.
+--device: The GPU index to use (e.g., gpu:0 means using the first GPU, gpu:1,2 means using the second and third GPUs), or you can choose to use CPU (--device cpu).
+```
+
+When executing the above Python script, the default pedestrian attribute recognition pipeline configuration file is loaded. If you need a custom configuration file, you can run the following command to obtain it:
+
+<details>
+   <summary> 👉Click to Expand</summary>
+
+```
+paddlex --get_pipeline_config pedestrian_attribute_recognition
+```
+After execution, the pedestrian attribute recognition pipeline configuration file will be saved in the current path. If you wish to specify a custom save location, you can run the following command (assuming the custom save location is `./my_path`):
+
+```
+paddlex --get_pipeline_config pedestrian_attribute_recognition --save_path ./my_path
+```
+
+After obtaining the pipeline configuration file, you can replace `--pipeline` with the saved path of the configuration file to make it effective. For example, if the configuration file is saved at `./pedestrian_attribute_recognition.yaml`, simply execute:
+
+```bash
+paddlex --pipeline ./pedestrian_attribute_recognition.yaml --input pedestrian_attribute_002.jpg --device gpu:0
+```
+Among them, parameters such as `--model` and `--device` do not need to be specified, and the parameters in the configuration file will be used. If parameters are still specified, the specified parameters will take precedence.
+
+</details>
+
+#### 2.2.2 Python Script Integration
+A few lines of code are sufficient for quick inference of the pipeline. Taking the pedestrian attribute recognition pipeline as an example:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="pedestrian_attribute_recognition")
+
+output = pipeline.predict("pedestrian_attribute_002.jpg")
+for res in output:
+    res.print()  ## Print the structured output of the prediction
+    res.save_to_img("./output/")  ## Save the visualized image of the result
+    res.save_to_json("./output/")  ## Save the structured output of the prediction
+```
+The results obtained are the same as those from the command line approach.
+
+In the above Python script, the following steps are executed:
+
+(1) Instantiate the `create_pipeline` to create a pipeline object: Specific parameter descriptions are as follows:
+
+| Parameter | Description | Parameter Type | Default Value |
+|-----------|-------------|----------------|---------------|
+| `pipeline` | The name of the pipeline or the path to the pipeline configuration file. If it is the name of the pipeline, it must be a pipeline supported by PaddleX. | `str` | None |
+| `device` | The device for pipeline model inference. Supports: "gpu", "cpu". | `str` | "gpu" |
+| `use_hpip` | Whether to enable high-performance inference, only available when the pipeline supports high-performance inference. | `bool` | `False` |
+
+(2) Call the `predict` method of the pedestrian attribute recognition pipeline object for inference prediction: The `predict` method parameter is `x`, which is used to input data to be predicted, supporting multiple input methods. Specific examples are as follows:
+
+| Parameter Type | Description |
+|----------------|-----------------------------------------------------------------------------------------------------------|
+| Python Var | Supports directly passing in Python variables, such as image data represented by numpy.ndarray. |
+| `str` | Supports passing in the file path of the data to be predicted, such as the local path of an image file: `/root/data/img.jpg`. |
+| `str` | Supports passing in the URL of the data file to be predicted, such as the network URL of an image file: [Example](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg). |
+| `str` | Supports passing in a local directory, which should contain the data files to be predicted, such as the local path: `/root/data/`. |
+| `dict` | Supports passing in a dictionary type, where the key needs to correspond to the specific task, such as "img" for the pedestrian attribute recognition task, and the value of the dictionary supports the above data types, for example: `{"img": "/root/data1"}`. |
+| `list` | Supports passing in a list, where the elements of the list need to be the above data types, such as `[numpy.ndarray, numpy.ndarray], ["/root/data/img1.jpg", "/root/data/img2.jpg"], ["/root/data1", "/root/data2"], [{"img": "/root/data1"}, {"img": "/root/data2/img.jpg"}]`. |
+
+(3) Obtain the prediction results by calling the `predict` method: The `predict` method is a `generator`, so prediction results need to be obtained through iteration. The `predict` method predicts data in batches, so the prediction results are in the form of a list.
+
+(4) Process the prediction results: The prediction result for each sample is of type `dict` and supports printing or saving as a file. The supported save types are related to the specific pipeline, such as:
+
+| Method | Description | Method Parameters |
+|--------|-------------|-------------------|
+| `print` | Print the results to the terminal | `- format_json`: bool, whether to format the output content with json indentation, default is True;<br>`- indent`: int, json formatting setting, only effective when `format_json` is True, default is 4;<br>`- ensure_ascii`: bool, json formatting setting, only effective when `format_json` is True, default is False; |
+| `save_to_json` | Save the results as a json-formatted file | `- save_path`: str, the path to save the file, when it is a directory, the saved file name is consistent with the input file name;<br>`- indent`: int, json formatting setting, default is 4;<br>`- ensure_ascii`: bool, json formatting setting, default is False; |
+| `save_to_img` | Save the results as an image file |
+
+If you have obtained the configuration file, you can customize various configurations for the pedestrian attribute recognition pipeline by simply modifying the `pipeline` parameter in the `create_pipeline` method to the path of your pipeline configuration file.
+
+For example, if your configuration file is saved as `./my_path/pedestrian_attribute_recognition*.yaml`, you only need to execute:
+
+```python
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/pedestrian_attribute_recognition.yaml")
+output = pipeline.predict("pedestrian_attribute_002.jpg")
+for res in output:
+    res.print()  # Print the structured output of the prediction
+    res.save_to_img("./output/")  # Save the visualized result image
+    res.save_to_json("./output/")  # Save the structured output of the prediction
+```
+## 3. Development Integration/Deployment
+If the face recognition pipeline meets your requirements for inference speed and accuracy, you can proceed directly with development integration/deployment.
+
+If you need to directly apply the face recognition pipeline in your Python project, you can refer to the example code in [2.2.2 Python Script Integration](#222-python-script-integration).
+
+Additionally, PaddleX provides three other deployment methods, detailed as follows:
+
+🚀 **High-Performance Inference**: In actual production environments, many applications have stringent standards for the performance metrics of deployment strategies (especially response speed) to ensure efficient system operation and smooth user experience. To this end, PaddleX provides high-performance inference plugins aimed at deeply optimizing model inference and pre/post-processing to significantly speed up the end-to-end process. For detailed high-performance inference procedures, please refer to the [PaddleX High-Performance Inference Guide](../../../pipeline_deploy/high_performance_inference.md).
+
+☁️ **Service-Oriented Deployment**: Service-oriented deployment is a common deployment form in actual production environments. By encapsulating inference functionality as services, clients can access these services through network requests to obtain inference results. PaddleX supports users in achieving service-oriented deployment of pipelines at low cost. For detailed service-oriented deployment procedures, please refer to the [PaddleX Service-Oriented Deployment Guide](../../../pipeline_deploy/service_deploy.md).
+
+Below are the API reference and multi-language service invocation examples:
+
+<details>
+<summary>API Reference</summary>
+
+For all operations provided by the service:
+
+- The response body and the request body of POST requests are both JSON data (JSON objects).
+- When the request is successfully processed, the response status code is `200`, and the attributes of the response body are as follows:
+
+    | Name | Type | Meaning |
+    |-|-|-|
+    |`errorCode`|`integer`|Error code. Fixed to `0`. |
+    |`errorMsg`|`string`|Error description. Fixed to `"Success"`. |
+
+    The response body may also have a `result` attribute of type `object`, which stores the operation result information.
+
+- When the request is not successfully processed, the attributes of the response body are as follows:
+
+    | Name | Type | Meaning |
+    |-|-|-|
+    |`errorCode`|`integer`|Error code. Same as the response status code. |
+    |`errorMsg`|`string`|Error description. |
+
+The operations provided by the service are as follows:
+
+- **`infer`**
+
+    Obtain OCR results for an image.
+
+    `POST /ocr`
+
+    - The attributes of the request body are as follows:
+
+        | Name | Type | Meaning | Required |
+        |-|-|-|-|
+        |`image`|`string`|The URL of an accessible image file or the Base64 encoded result of the image file content. |Yes|
+        |`inferenceParams`|`object`|Inference parameters. |No|
+
+        The attributes of```markdown
+<details>
+<summary>Python</summary>
+
+```python
+import base64
+import requests
+
+API_URL = "http://localhost:8080/ocr" # Service URL
+image_path = "./demo.jpg"
+output_image_path = "./out.jpg"
+
+# Encode the local image to Base64
+with open(image_path, "rb") as file:
+    image_bytes = file.read()
+    image_data = base64.b64encode(image_bytes).decode("ascii")
+
+payload = {"image": image_data}  # Base64 encoded file content or image URL
+
+# Call the API
+response = requests.post(API_URL, json=payload)
+
+# Process the response data
+assert response.status_code == 200
+result = response.json()["result"]
+with open(output_image_path, "wb") as file:
+    file.write(base64.b64decode(result["image"]))
+print(f"Output image saved at {output_image_path}")
+print("\nDetected texts:")
+print(result["texts"])
+```
+
+</details>
+
+<details>
+<summary>C++</summary>
+
+```cpp
+#include <iostream>
+#include "cpp-httplib/httplib.h" // https://github.com/Huiyicc/cpp-httplib
+#include "nlohmann/json.hpp" // https://github.com/nlohmann/json
+#include "base64.hpp" // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client("localhost:8080");
+    const std::string imagePath = "./demo.jpg";
+    const std::string outputImagePath = "./out.jpg";
+
+    httplib::Headers headers = {
+        {"Content-Type", "application/json"}
+    };
+
+    // Encode the local image to Base64
+    std::ifstream file(imagePath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector<char> buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr << "Error reading file." << std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast<const char*>(buffer.data()), buffer.size());
+    std::string encodedImage = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj["image"] = encodedImage;
+    std::string body = jsonObj.dump();
+
+    // Call the API
+    auto response = client.Post("/ocr", headers, body, "application/json");
+    // Process the response data
+    if (response && response->status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response->body);
+        auto result = jsonResponse["result"];
+
+        encodedImage = result["image"];
+        std::string decodedString = base64::from_base64(encodedImage);
+        std::vector<unsigned char> decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outputImagePath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast<char*>(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout << "Output image saved at " << outputImagePath << std::endl;
+        } else {
+            std::cerr << "Unable to open file for writing: " << outputImagePath << std::endl;
+        }
+
+        auto texts = result["texts"];
+        std::cout << "\nDetected texts:" << std::endl;
+        for (const auto& text : texts) {
+            std::cout << text << std::endl;
+        }
+    } else {
+        std::cout << "Failed to send HTTP request." << std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+
+```
+
+</details>
+``````markdown
+# Tutorial on Artificial Intelligence and Computer Vision
+
+This tutorial, intended for numerous developers, covers the basics and applications of AI and Computer Vision.
+
+<details>
+<summary>Java</summary>
+
+```java
+import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = "http://localhost:8080/ocr"; // Service URL
+        String imagePath = "./demo.jpg"; // Local image path
+        String outputImagePath = "./out.jpg"; // Output image path
+
+        // Encode the local image to Base64
+        File file = new File(imagePath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String imageData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put("image", imageData); // Base64-encoded file content or image URL
+
+        // Create an OkHttpClient instance
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.get("application/json; charset=utf-8");
+        RequestBody body = RequestBody.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        // Call the API and process the response
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get("result");
+                String base64Image = result.get("image").asText();
+                JsonNode texts = result.get("texts");
+
+                byte[] imageBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputImagePath)) {
+                    fos.write(imageBytes);
+                }
+                System.out.println("Output image saved at " + outputImagePath);
+                System.out.println("\nDetected texts: " + texts.toString());
+            } else {
+                System.err.println("Request failed with code: " + response.code());
+            }
+        }
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Go</summary>
+
+```go
+package main
+
+import (
+    "bytes"
+    "encoding/base64"
+    "encoding/json"
+    "fmt"
+    "io/ioutil"
+    "net/http"
+)
+
+func main() {
+    API_URL := "http://localhost:8080/ocr"
+    imagePath := "./demo.jpg"
+    outputImagePath := "./out.jpg"
+
+    // Encode the local image to Base64
+    imageBytes, err := ioutil.ReadFile(imagePath)
+    if err != nil {
+        fmt.Println("Error reading image file:", err)
+        return
+    }
+    imageData := base64.StdEncoding.EncodeToString(imageBytes)
+
+    payload := map[string]string{"image": imageData} // Base64-encoded file content or image URL
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println("Error marshaling payload:", err)
+        return
+    }
+
+    // Call the API
+    client := &http.Client{}
+    req, err := http.NewRequest("POST", API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println("Error creating request:", err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println("Error sending request:", err)
+        return
+    }
+    defer res.Body.Close()
+
+    // Process the response
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println("Error reading response body:", err)
+        return
+    }```markdown
+# An English Tutorial on Artificial Intelligence and Computer Vision
+
+This tutorial document is intended for numerous developers and covers content related to artificial intelligence and computer vision.
+
+<details>
+<summary>C#</summary>
+
+```csharp
+using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = "http://localhost:8080/ocr";
+    static readonly string imagePath = "./demo.jpg";
+    static readonly string outputImagePath = "./out.jpg";
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        // Encode the local image to Base64
+        byte[] imageBytes = File.ReadAllBytes(imagePath);
+        string image_data = Convert.ToBase64String(imageBytes);
+
+        var payload = new JObject{ { "image", image_data } }; // Base64 encoded file content or image URL
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json");
+
+        // Call the API
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        // Process the API response
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse["result"]["image"].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputImagePath, outputImageBytes);
+        Console.WriteLine($"Output image saved at {outputImagePath}");
+        Console.WriteLine("\nDetected texts:");
+        Console.WriteLine(jsonResponse["result"]["texts"].ToString());
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Node.js</summary>
+
+```js
+const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/ocr';
+const imagePath = './demo.jpg';
+const outputImagePath = "./out.jpg";
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'image': encodeImageToBase64(imagePath)  // Base64 encoded file content or image URL
+  })
+};
+
+// Encode the local image to Base64
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+// Call the API
+axios.request(config)
+.then((response) => {
+    // Process the API response
+    const result = response.data["result"];
+    const imageBuffer = Buffer.from(result["image"], 'base64');
+    fs.writeFile(outputImagePath, imageBuffer, (err) => {
+      if (err) throw err;
+      console.log(`Output image saved at ${outputImagePath}`);
+    });
+    console.log("\nDetected texts:");
+    console.log(result["texts"]);
+})
+.catch((error) => {
+  console.log(error);
+});
+```
+
+</details>
+
+<details>
+<summary>PHP</summary>
+
+```php
+<?php
+
+$API_URL = "http://localhost:8080/ocr"; // Service URL
+$image_path = "./demo.jpg";
+$output_image_path = "./out.jpg";
+
+// Encode the local image to Base64
+$image_data = base64_encode(file_get_contents($image_path));
+$payload = array("image" => $image_data); // Base64 encoded file content or image URL
+
+// Call the API
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+// Process the API response
+$result = json_decode($response, true)["result"];
+file_put_contents($output
+```
+
+<details>
+<details>
+<br/>
+
+📱 **Edge Deployment**: Edge deployment is a method where computing and data processing functions are placed on the user's device itself, allowing the device to process data directly without relying on remote servers. PaddleX supports deploying models on edge devices such as Android. For detailed edge deployment procedures, please refer to the [PaddleX Edge Deployment Guide](../../../pipeline_deploy/edge_deploy_en.md).
+You can choose an appropriate method to deploy your model pipeline based on your needs, and proceed with subsequent AI application integration.
+
+
+## 4. Custom Development
+If the default model weights provided by the Face Recognition Pipeline do not meet your expectations in terms of accuracy or speed for your specific scenario, you can try to further **fine-tune** the existing models using **your own domain-specific or application-specific data** to enhance the recognition performance of the pipeline in your scenario.
+
+### 4.1 Model Fine-tuning
+Since the Face Recognition Pipeline consists of two modules (face detection and face recognition), the suboptimal performance of the pipeline may stem from either module.
+
+You can analyze images with poor recognition results. If you find that many faces are not detected during the analysis, it may indicate deficiencies in the face detection model. In this case, you need to refer to the [Custom Development](../../../module_usage/tutorials/cv_modules/face_detection_en.md#IV.-Custom-Development) section in the [Face Detection Module Development Tutorial](../../../module_usage/tutorials/cv_modules/face_detection_en.md) and use your private dataset to fine-tune the face detection model. If matching errors occur in detected faces, it suggests that the face feature model needs further improvement. You should refer to the [Custom Development](../../../module_usage/tutorials/cv_modules/face_feature_en.md#IV.-Custom-Development) section in the [Face Feature Module Development Tutorial](../../../module_usage/tutorials/cv_modules/face_feature_en.md) to fine-tune the face feature model.
+
+### 4.2 Model Application
+After completing fine-tuning training with your private dataset, you will obtain local model weight files.
+
+To use the fine-tuned model weights, you only need to modify the pipeline configuration file by replacing the local paths of the fine-tuned model weights with the corresponding paths in the pipeline configuration file:
+
+```bash
+
+......
+Pipeline:
+  device: "gpu:0"
+  det_model: "BlazeFace"        # Can be modified to the local path of the fine-tuned face detection model
+  rec_model: "MobileFaceNet"    # Can be modified to the local path of the fine-tuned face recognition model
+  det_batch_size: 1
+  rec_batch_size: 1
+  device: gpu
+......
+```
+Subsequently, refer to the command-line method or Python script method in [2.2 Local Experience](#22-Local-Experience) to load the modified pipeline configuration file.
+Note: Currently, setting separate `batch_size` for face detection and face recognition models is not supported.
+
+## 5. Multi-hardware Support
+PaddleX supports various mainstream hardware devices such as NVIDIA GPUs, Kunlun XPU, Ascend NPU, and Cambricon MLU. **Simply modifying the `--device` parameter** allows seamless switching between different hardware.
+
+For example, when running the face recognition pipeline using Python and changing the running device from an NVIDIA GPU to an Ascend NPU, you only need to modify the `device` in the script to `npu`:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(
+    pipeline="face_recognition",
+    device="npu:0" # gpu:0 --> npu:0
+)
+```
+If you want to use the face recognition pipeline on more types of hardware, please refer to the [PaddleX Multi-device Usage Guide](../../../other_devices_support/multi_devices_use_guide_en.md).

+ 0 - 169
docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute.md

@@ -1,169 +0,0 @@
-简体中文 | [English](vehicle_attribute_en.md)
-
-# 车辆属性识别产线使用教程
-
-## 1. 车辆属性识别产线介绍
-Stay tuned
-
-## 2. 快速开始
-PaddleX 所提供的预训练的模型产线均可以快速体验效果,你可以在线体验车辆属性识别产线的效果,也可以在本地使用命令行或 Python 体验车辆属性识别产线的效果。
-
-### 2.1 在线体验
-Stay tuned
-
-### 2.2 本地体验
-在本地使用车辆属性识别产线前,请确保您已经按照[PaddleX本地安装教程](../../../installation/installation.md)完成了PaddleX的wheel包安装。
-
-#### 2.2.1 命令行方式体验
-一行命令即可快速体验车辆属性识别产线效果,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg),并将 `--input` 替换为本地路径,进行预测
-
-```bash
-paddlex --pipeline vehicle_attribute --input vehicle_attribute_002.jpg --device gpu:0
-```
-参数说明:
-
-```
---pipeline:产线名称,此处为车辆属性识别产线
---input:待处理的输入图片的本地路径或URL
---device 使用的GPU序号(例如gpu:0表示使用第0块GPU,gpu:1,2表示使用第1、2块GPU),也可选择使用CPU(--device cpu)
-```
-
-在执行上述 Python 脚本时,加载的是默认的车辆属性识别产线配置文件,若您需要自定义配置文件,可执行如下命令获取:
-
-<details>
-   <summary> 👉点击展开</summary>
-
-```
-paddlex --get_pipeline_config vehicle_attribute
-```
-执行后,车辆属性识别产线配置文件将被保存在当前路径。若您希望自定义保存位置,可执行如下命令(假设自定义保存位置为 `./my_path` ):
-
-```
-paddlex --get_pipeline_config vehicle_attribute --save_path ./my_path
-```
-
-获取产线配置文件后,可将 `--pipeline` 替换为配置文件保存路径,即可使配置文件生效。例如,若配置文件保存路径为 `./vehicle_attribute.yaml`,只需执行:
-
-```bash
-paddlex --pipeline ./vehicle_attribute.yaml --input vehicle_attribute_002.jpg --device gpu:0
-```
-其中,`--model`、`--device` 等参数无需指定,将使用配置文件中的参数。若依然指定了参数,将以指定的参数为准。
-
-</details>
-
-运行后,得到的结果为:
-
-```
-{'input_path': 'vehicle_attribute_002.jpg', 'boxes': [{'labels': ['Trousers(长裤)', 'Age18-60(年龄在18-60岁之间)', 'LongCoat(长外套)', 'Side(侧面)'], 'cls_scores': array([0.99965, 0.99963, 0.98866, 0.9624 ]), 'det_score': 0.9795265793800354, 'coordinate': [87.24845, 322.57797, 546.27014, 1039.9806]}, {'labels': ['Trousers(长裤)', 'LongCoat(长外套)', 'Front(面朝前)', 'Age18-60(年龄在18-60岁之间)'], 'cls_scores': array([0.99996, 0.99872, 0.93379, 0.71614]), 'det_score': 0.9671529531478882, 'coordinate': [737.9159, 306.28375, 1150.6005, 1034.2983]}, {'labels': ['Trousers(长裤)', 'LongCoat(长外套)', 'Age18-60(年龄在18-60岁之间)', 'Side(侧面)'], 'cls_scores': array([0.99996, 0.99514, 0.98726, 0.96224]), 'det_score': 0.9645677208900452, 'coordinate': [399.46594, 281.90945, 869.5361, 1038.995]}]}
-```
-
-#### 2.2.2 Python脚本方式集成
-几行代码即可完成产线的快速推理,以车辆属性识别产线为例:
-
-```
-from paddlex import create_pipeline
-
-pipeline = create_pipeline(pipeline="vehicle_attribute")
-
-output = pipeline.predict("vehicle_attribute_002.jpg")
-for res in output:
-    res.print() ## 打印预测的结构化输出
-    res.save_to_img("./output/") ## 保存结果可视化图像
-    res.save_to_json("./output/") ## 保存预测的结构化输出
-```
-得到的结果与命令行方式相同。
-
-在上述 Python 脚本中,执行了如下几个步骤:
-
-(1)实例化 `create_pipeline` 实例化产线对象:具体参数说明如下:
-
-|参数|参数说明|参数类型|默认值|
-|-|-|-|-|
-|`pipeline`|产线名称或是产线配置文件路径。如为产线名称,则必须为 PaddleX 所支持的产线。|`str`|无|
-|`device`|产线模型推理设备。支持:“gpu”,“cpu”。|`str`|`gpu`|
-|`use_hpip`|是否启用高性能推理,仅当该产线支持高性能推理时可用。|`bool`|`False`|
-
-(2)调用车辆属性识别产线对象的 `predict` 方法进行推理预测:`predict` 方法参数为`x`,用于输入待预测数据,支持多种输入方式,具体示例如下:
-
-| 参数类型      | 参数说明                                                                                                  |
-|---------------|-----------------------------------------------------------------------------------------------------------|
-| Python Var    | 支持直接传入Python变量,如numpy.ndarray表示的图像数据。                                               |
-| str         | 支持传入待预测数据文件路径,如图像文件的本地路径:`/root/data/img.jpg`。                                   |
-| str           | 支持传入待预测数据文件URL,如图像文件的网络URL:[示例](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg)。|
-| str           | 支持传入本地目录,该目录下需包含待预测数据文件,如本地路径:`/root/data/`。                               |
-| dict          | 支持传入字典类型,字典的key需与具体任务对应,如车辆属性识别任务对应\"img\",字典的val支持上述类型数据,例如:`{\"img\": \"/root/data1\"}`。|
-| list          | 支持传入列表,列表元素需为上述类型数据,如`[numpy.ndarray, numpy.ndarray],[\"/root/data/img1.jpg\", \"/root/data/img2.jpg\"]`,`[\"/root/data1\", \"/root/data2\"]`,`[{\"img\": \"/root/data1\"}, {\"img\": \"/root/data2/img.jpg\"}]`。|
-
-(3)调用`predict`方法获取预测结果:`predict` 方法为`generator`,因此需要通过调用获得预测结果,`predict`方法以batch为单位对数据进行预测,因此预测结果为list形式表示的一组预测结果。
-
-(4)对预测结果进行处理:每个样本的预测结果均为`dict`类型,且支持打印,或保存为文件,支持保存的类型与具体产线相关,如:
-
-| 方法         | 说明                        | 方法参数                                                                                               |
-|--------------|-----------------------------|--------------------------------------------------------------------------------------------------------|
-| print        | 打印结果到终端              | `- format_json`:bool类型,是否对输出内容进行使用json缩进格式化,默认为True;<br>`- indent`:int类型,json格式化设置,仅当format_json为True时有效,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,仅当format_json为True时有效,默认为False; |
-| save_to_json | 将结果保存为json格式的文件   | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;<br>`- indent`:int类型,json格式化设置,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,默认为False; |
-| save_to_img  | 将结果保存为图像格式的文件  | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致; |
-
-若您获取了配置文件,即可对车辆属性识别产线各项配置进行自定义,只需要修改 `create_pipeline` 方法中的 `pipeline` 参数值为产线配置文件路径即可。
-
-例如,若您的配置文件保存在 `./my_path/vehicle_attribute*.yaml` ,则只需执行:
-
-```
-from paddlex import create_pipeline
-pipeline = create_pipeline(pipeline="./my_path/vehicle_attribute.yaml")
-output = pipeline.predict("vehicle_attribute_002.jpg")
-for res in output:
-    res.print() ## 打印预测的结构化输出
-    res.save_to_img("./output/") ## 保存结果可视化图像
-    res.save_to_json("./output/") ## 保存预测的结构化输出
-```
-## 3. 开发集成/部署
-如果产线可以达到您对产线推理速度和精度的要求,您可以直接进行开发集成/部署。
-
-若您需要将产线直接应用在您的Python项目中,可以参考 [2.2.2 Python脚本方式](#222-python脚本方式集成)中的示例代码。
-
-此外,PaddleX 也提供了其他三种部署方式,详细说明如下:
-
-🚀 **高性能推理**:在实际生产环境中,许多应用对部署策略的性能指标(尤其是响应速度)有着较严苛的标准,以确保系统的高效运行与用户体验的流畅性。为此,PaddleX 提供高性能推理插件,旨在对模型推理及前后处理进行深度性能优化,实现端到端流程的显著提速,详细的高性能推理流程请参考[PaddleX高性能推理指南](../../../pipeline_deploy/high_performance_inference.md)。
-
-☁️ **服务化部署**:服务化部署是实际生产环境中常见的一种部署形式。通过将推理功能封装为服务,客户端可以通过网络请求来访问这些服务,以获取推理结果。PaddleX 支持用户以低成本实现产线的服务化部署,详细的服务化部署流程请参考[PaddleX服务化部署指南](../../../pipeline_deploy/service_deploy.md)。
-
-📱 **端侧部署**:端侧部署是一种将计算和数据处理功能放在用户设备本身上的方式,设备可以直接处理数据,而不需要依赖远程的服务器。PaddleX 支持将模型部署在 Android 等端侧设备上,详细的端侧部署流程请参考[PaddleX端侧部署指南](../../../pipeline_deploy/edge_deploy.md)。
-您可以根据需要选择合适的方式部署模型产线,进而进行后续的 AI 应用集成。
-
-## 4. 二次开发
-如果车辆属性识别产线提供的默认模型权重在您的场景中,精度或速度不满意,您可以尝试利用**您自己拥有的特定领域或应用场景的数据**对现有模型进行进一步的**微调**,以提升车辆属性识别产线的在您的场景中的识别效果。
-
-### 4.1 模型微调
-由于车辆属性识别产线包含车辆属性识别模块和车辆检测模块,如果模型产线的效果不及预期可能来自于其中任何一个模块。
-您可以对识别效果差的图片进行分析,如果在分析过程中发现有较多的主体目标未被检测出来,那么可能是车辆检测模型存在不足那么您需要参考[车辆检测模块开发教程](../../../module_usage/tutorials/cv_modules/human_detection.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/human_detection.md#四二次开发)章节,使用您的私有数据集对车辆检测模型进行微调;如果检测出来的主体属性识别错误,那么您需要参考[车辆属性识别模块开发教程](../../../module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md#四二次开发)章节,使用您的私有数据集对车辆属性识别模型进行微调。
-
-### 4.2 模型应用
-当您使用私有数据集完成微调训练后,可获得本地模型权重文件。
-
-若您需要使用微调后的模型权重,只需对产线配置文件做修改,将微调后模型权重的本地路径替换至产线配置文件中的对应位置即可:
-
-```
-......
-Pipeline:
-  model: PP-LCNet_x1_0  #可修改为微调后模型的本地路径
-  device: "gpu"
-  batch_size: 1
-......
-```
-随后, 参考本地体验中的命令行方式或 Python 脚本方式,加载修改后的产线配置文件即可。
-
-##  5. 多硬件支持
-PaddleX 支持英伟达 GPU、昆仑芯 XPU、昇腾 NPU和寒武纪 MLU 等多种主流硬件设备,**仅需修改 `--device` 参数**即可完成不同硬件之间的无缝切换。
-
-例如,您使用英伟达 GPU 进行车辆属性识别产线的推理,使用的命令为:
-
-```bash
-paddlex --pipeline vehicle_attribute --input vehicle_attribute_002.jpg --device gpu:0
-```
-此时,若您想将硬件切换为昇腾 NPU,仅需将 `--device` 修改为 npu:0 即可:
-
-```bash
-paddlex --pipeline vehicle_attribute --input vehicle_attribute_002.jpg --device npu:0
-```
-若您想在更多种类的硬件上使用车辆属性识别产线,请参考[PaddleX多硬件使用指南](../../../other_devices_support/multi_devices_use_guide.md)。

+ 716 - 0
docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute_recognition.md

@@ -0,0 +1,716 @@
+简体中文 | [English](vehicle_attribute_recognition_en.md)
+
+# 车辆属性识别产线使用教程
+
+## 1. 车辆属性识别产线介绍
+车辆属性识别是计算机视觉系统中的重要组成部分,其主要任务是在图像或视频中定位并标记出车辆的特定属性,如车辆类型、颜色、车牌号等。该任务不仅要求准确检测出车辆,还需识别每辆车的详细属性信息。车辆属性识别产线是定位并识别车辆属性的端到端串联系统,广泛应用于交通管理、智能停车、安防监控、自动驾驶等领域,显著提升了系统效率和智能化水平,并推动了相关行业的发展与创新。
+
+![](https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/pipelines/vehicle_attribute_recognition/01.jpg)
+
+**车辆属性识别产线中包含了车辆检测模块和车辆属性识别模块**,每个模块中包含了若干模型,具体使用哪些模型,您可以根据下边的 benchmark 数据来选择。**如您更考虑模型精度,请选择精度较高的模型,如您更考虑模型推理速度,请选择推理速度较快的模型,如您更考虑模型存储大小,请选择存储大小较小的模型**。
+
+<details>
+   <summary> 👉模型列表详情</summary>
+
+**车辆检测模块:**
+
+<table>
+  <tr>
+    <th>模型</th>
+    <th>mAP 0.5:0.95</th>
+    <th>GPU推理耗时(ms)</th>
+    <th>CPU推理耗时 (ms)</th>
+    <th>模型存储大小(M)</th>
+    <th>介绍</th>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-S_vehicle</td>
+    <td>61.3</td>
+    <td>15.4</td>
+    <td>178.4</td>
+    <td>28.79</td>
+    <td rowspan="2">基于PP-YOLOE的车辆检测模型</td>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-L_vehicle</td>
+    <td>63.9</td>
+    <td>32.6</td>
+    <td>775.6</td>
+    <td>196.02</td>
+  </tr>
+</table>
+
+**注:以上精度指标为PPVehicle 验证集 mAP(0.5:0.95)。所有模型 GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为8,精度类型为 FP32。**
+
+**车辆属性识别模块:**
+
+|模型|mA(%)|GPU推理耗时(ms)|CPU推理耗时 (ms)|模型存储大小(M)|介绍|
+|-|-|-|-|-|-|
+|PP-LCNet_x1_0_vehicle_attribute|91.7|3.84845|9.23735|6.7 M|PP-LCNet_x1_0_vehicle_attribute 是一种基于PP-LCNet的轻量级车辆属性识别模型。|
+
+**注:以上精度指标为 VeRi 数据集mA。GPU 推理耗时基于 NVIDIA Tesla T4 机器,精度类型为 FP32, CPU 推理速度基于 Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz,线程数为 8,精度类型为 FP32。**
+
+</details>
+
+## 2. 快速开始
+PaddleX 所提供的预训练的模型产线均可以快速体验效果,你可以在线体验车辆属性识别产线的效果,也可以在本地使用命令行或 Python 体验车辆属性识别产线的效果。
+
+### 2.1 在线体验
+暂不支持在线体验
+
+### 2.2 本地体验
+在本地使用车辆属性识别产线前,请确保您已经按照[PaddleX本地安装教程](../../../installation/installation.md)完成了PaddleX的wheel包安装。
+
+#### 2.2.1 命令行方式体验
+一行命令即可快速体验车辆属性识别产线效果,使用 [测试文件](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg),并将 `--input` 替换为本地路径,进行预测
+
+```bash
+paddlex --pipeline vehicle_attribute_recognition --input vehicle_attribute_002.jpg --device gpu:0
+```
+参数说明:
+
+```
+--pipeline:产线名称,此处为车辆属性识别产线
+--input:待处理的输入图片的本地路径或URL
+--device 使用的GPU序号(例如gpu:0表示使用第0块GPU,gpu:1,2表示使用第1、2块GPU),也可选择使用CPU(--device cpu)
+```
+
+在执行上述 Python 脚本时,加载的是默认的车辆属性识别产线配置文件,若您需要自定义配置文件,可执行如下命令获取:
+
+<details>
+   <summary> 👉点击展开</summary>
+
+```
+paddlex --get_pipeline_config vehicle_attribute_recognition
+```
+执行后,车辆属性识别产线配置文件将被保存在当前路径。若您希望自定义保存位置,可执行如下命令(假设自定义保存位置为 `./my_path` ):
+
+```
+paddlex --get_pipeline_config vehicle_attribute_recognition --save_path ./my_path
+```
+
+获取产线配置文件后,可将 `--pipeline` 替换为配置文件保存路径,即可使配置文件生效。例如,若配置文件保存路径为 `./vehicle_attribute_recognition.yaml`,只需执行:
+
+```bash
+paddlex --pipeline ./vehicle_attribute_recognition.yaml --input vehicle_attribute_002.jpg --device gpu:0
+```
+其中,`--model`、`--device` 等参数无需指定,将使用配置文件中的参数。若依然指定了参数,将以指定的参数为准。
+
+</details>
+
+#### 2.2.2 Python脚本方式集成
+几行代码即可完成产线的快速推理,以车辆属性识别产线为例:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="vehicle_attribute_recognition")
+
+output = pipeline.predict("vehicle_attribute_002.jpg")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_img("./output/") ## 保存结果可视化图像
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+得到的结果与命令行方式相同。
+
+在上述 Python 脚本中,执行了如下几个步骤:
+
+(1)实例化 `create_pipeline` 实例化产线对象:具体参数说明如下:
+
+|参数|参数说明|参数类型|默认值|
+|-|-|-|-|
+|`pipeline`|产线名称或是产线配置文件路径。如为产线名称,则必须为 PaddleX 所支持的产线。|`str`|无|
+|`device`|产线模型推理设备。支持:“gpu”,“cpu”。|`str`|`gpu`|
+|`use_hpip`|是否启用高性能推理,仅当该产线支持高性能推理时可用。|`bool`|`False`|
+
+(2)调用车辆属性识别产线对象的 `predict` 方法进行推理预测:`predict` 方法参数为`x`,用于输入待预测数据,支持多种输入方式,具体示例如下:
+
+| 参数类型      | 参数说明                                                                                                  |
+|---------------|-----------------------------------------------------------------------------------------------------------|
+| Python Var    | 支持直接传入Python变量,如numpy.ndarray表示的图像数据。                                               |
+| str         | 支持传入待预测数据文件路径,如图像文件的本地路径:`/root/data/img.jpg`。                                   |
+| str           | 支持传入待预测数据文件URL,如图像文件的网络URL:[示例](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg)。|
+| str           | 支持传入本地目录,该目录下需包含待预测数据文件,如本地路径:`/root/data/`。                               |
+| dict          | 支持传入字典类型,字典的key需与具体任务对应,如车辆属性识别任务对应\"img\",字典的val支持上述类型数据,例如:`{\"img\": \"/root/data1\"}`。|
+| list          | 支持传入列表,列表元素需为上述类型数据,如`[numpy.ndarray, numpy.ndarray],[\"/root/data/img1.jpg\", \"/root/data/img2.jpg\"]`,`[\"/root/data1\", \"/root/data2\"]`,`[{\"img\": \"/root/data1\"}, {\"img\": \"/root/data2/img.jpg\"}]`。|
+
+(3)调用`predict`方法获取预测结果:`predict` 方法为`generator`,因此需要通过调用获得预测结果,`predict`方法以batch为单位对数据进行预测,因此预测结果为list形式表示的一组预测结果。
+
+(4)对预测结果进行处理:每个样本的预测结果均为`dict`类型,且支持打印,或保存为文件,支持保存的类型与具体产线相关,如:
+
+| 方法         | 说明                        | 方法参数                                                                                               |
+|--------------|-----------------------------|--------------------------------------------------------------------------------------------------------|
+| print        | 打印结果到终端              | `- format_json`:bool类型,是否对输出内容进行使用json缩进格式化,默认为True;<br>`- indent`:int类型,json格式化设置,仅当format_json为True时有效,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,仅当format_json为True时有效,默认为False; |
+| save_to_json | 将结果保存为json格式的文件   | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致;<br>`- indent`:int类型,json格式化设置,默认为4;<br>`- ensure_ascii`:bool类型,json格式化设置,默认为False; |
+| save_to_img  | 将结果保存为图像格式的文件  | `- save_path`:str类型,保存的文件路径,当为目录时,保存文件命名与输入文件类型命名一致; |
+
+若您获取了配置文件,即可对车辆属性识别产线各项配置进行自定义,只需要修改 `create_pipeline` 方法中的 `pipeline` 参数值为产线配置文件路径即可。
+
+例如,若您的配置文件保存在 `./my_path/vehicle_attribute_recognition.yaml` ,则只需执行:
+
+```python
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/vehicle_attribute_recognition.yaml")
+output = pipeline.predict("vehicle_attribute_002.jpg")
+for res in output:
+    res.print() ## 打印预测的结构化输出
+    res.save_to_img("./output/") ## 保存结果可视化图像
+    res.save_to_json("./output/") ## 保存预测的结构化输出
+```
+## 3. 开发集成/部署
+如果产线可以达到您对产线推理速度和精度的要求,您可以直接进行开发集成/部署。
+
+若您需要将产线直接应用在您的Python项目中,可以参考 [2.2.2 Python脚本方式](#222-python脚本方式集成)中的示例代码。
+
+此外,PaddleX 也提供了其他三种部署方式,详细说明如下:
+
+🚀 **高性能推理**:在实际生产环境中,许多应用对部署策略的性能指标(尤其是响应速度)有着较严苛的标准,以确保系统的高效运行与用户体验的流畅性。为此,PaddleX 提供高性能推理插件,旨在对模型推理及前后处理进行深度性能优化,实现端到端流程的显著提速,详细的高性能推理流程请参考[PaddleX高性能推理指南](../../../pipeline_deploy/high_performance_inference.md)。
+
+☁️ **服务化部署**:服务化部署是实际生产环境中常见的一种部署形式。通过将推理功能封装为服务,客户端可以通过网络请求来访问这些服务,以获取推理结果。PaddleX 支持用户以低成本实现产线的服务化部署,详细的服务化部署流程请参考[PaddleX服务化部署指南](../../../pipeline_deploy/service_deploy.md)。
+
+下面是API参考和多语言服务调用示例:
+
+<details>
+<summary>API参考</summary>
+
+对于服务提供的所有操作:
+
+- 响应体以及POST请求的请求体均为JSON数据(JSON对象)。
+- 当请求处理成功时,响应状态码为`200`,响应体的属性如下:
+
+    |名称|类型|含义|
+    |-|-|-|
+    |`errorCode`|`integer`|错误码。固定为`0`。|
+    |`errorMsg`|`string`|错误说明。固定为`"Success"`。|
+
+    响应体还可能有`result`属性,类型为`object`,其中存储操作结果信息。
+
+- 当请求处理未成功时,响应体的属性如下:
+
+    |名称|类型|含义|
+    |-|-|-|
+    |`errorCode`|`integer`|错误码。与响应状态码相同。|
+    |`errorMsg`|`string`|错误说明。|
+
+服务提供的操作如下:
+
+- **`infer`**
+
+    获取图像OCR结果。
+
+    `POST /ocr`
+
+    - 请求体的属性如下:
+
+        |名称|类型|含义|是否必填|
+        |-|-|-|-|
+        |`image`|`string`|服务可访问的图像文件的URL或图像文件内容的Base64编码结果。|是|
+        |`inferenceParams`|`object`|推理参数。|否|
+
+        `inferenceParams`的属性如下:
+
+        |名称|类型|含义|是否必填|
+        |-|-|-|-|
+        |`maxLongSide`|`integer`|推理时,若文本检测模型的输入图像较长边的长度大于`maxLongSide`,则将对图像进行缩放,使其较长边的长度等于`maxLongSide`。|否|
+
+    - 请求处理成功时,响应体的`result`具有如下属性:
+
+        |名称|类型|含义|
+        |-|-|-|
+        |`texts`|`array`|文本位置、内容和得分。|
+        |`image`|`string`|OCR结果图,其中标注检测到的文本位置。图像为JPEG格式,使用Base64编码。|
+
+        `texts`中的每个元素为一个`object`,具有如下属性:
+
+        |名称|类型|含义|
+        |-|-|-|
+        |`poly`|`array`|文本位置。数组中元素依次为包围文本的多边形的顶点坐标。|
+        |`text`|`string`|文本内容。|
+        |`score`|`number`|文本识别得分。|
+
+        `result`示例如下:
+
+        ```json
+        {
+          "texts": [
+            {
+              "poly": [
+                [
+                  444,
+                  244
+                ],
+                [
+                  705,
+                  244
+                ],
+                [
+                  705,
+                  311
+                ],
+                [
+                  444,
+                  311
+                ]
+              ],
+              "text": "北京南站",
+              "score": 0.9
+            },
+            {
+              "poly": [
+                [
+                  992,
+                  248
+                ],
+                [
+                  1263,
+                  251
+                ],
+                [
+                  1263,
+                  318
+                ],
+                [
+                  992,
+                  315
+                ]
+              ],
+              "text": "天津站",
+              "score": 0.5
+            }
+          ],
+          "image": "xxxxxx"
+        }
+        ```
+
+</details>
+
+<details>
+<summary>多语言调用服务示例</summary>
+
+<details>
+<summary>Python</summary>
+
+```python
+import base64
+import requests
+
+API_URL = "http://localhost:8080/ocr" # 服务URL
+image_path = "./demo.jpg"
+output_image_path = "./out.jpg"
+
+# 对本地图像进行Base64编码
+with open(image_path, "rb") as file:
+    image_bytes = file.read()
+    image_data = base64.b64encode(image_bytes).decode("ascii")
+
+payload = {"image": image_data}  # Base64编码的文件内容或者图像URL
+
+# 调用API
+response = requests.post(API_URL, json=payload)
+
+# 处理接口返回数据
+assert response.status_code == 200
+result = response.json()["result"]
+with open(output_image_path, "wb") as file:
+    file.write(base64.b64decode(result["image"]))
+print(f"Output image saved at {output_image_path}")
+print("\nDetected texts:")
+print(result["texts"])
+```
+
+</details>
+
+<details>
+<summary>C++</summary>
+
+```cpp
+#include <iostream>
+#include "cpp-httplib/httplib.h" // https://github.com/Huiyicc/cpp-httplib
+#include "nlohmann/json.hpp" // https://github.com/nlohmann/json
+#include "base64.hpp" // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client("localhost:8080");
+    const std::string imagePath = "./demo.jpg";
+    const std::string outputImagePath = "./out.jpg";
+
+    httplib::Headers headers = {
+        {"Content-Type", "application/json"}
+    };
+
+    // 对本地图像进行Base64编码
+    std::ifstream file(imagePath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector<char> buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr << "Error reading file." << std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast<const char*>(buffer.data()), buffer.size());
+    std::string encodedImage = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj["image"] = encodedImage;
+    std::string body = jsonObj.dump();
+
+    // 调用API
+    auto response = client.Post("/ocr", headers, body, "application/json");
+    // 处理接口返回数据
+    if (response && response->status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response->body);
+        auto result = jsonResponse["result"];
+
+        encodedImage = result["image"];
+        std::string decodedString = base64::from_base64(encodedImage);
+        std::vector<unsigned char> decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outPutImagePath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast<char*>(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout << "Output image saved at " << outPutImagePath << std::endl;
+        } else {
+            std::cerr << "Unable to open file for writing: " << outPutImagePath << std::endl;
+        }
+
+        auto texts = result["texts"];
+        std::cout << "\nDetected texts:" << std::endl;
+        for (const auto& text : texts) {
+            std::cout << text << std::endl;
+        }
+    } else {
+        std::cout << "Failed to send HTTP request." << std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+```
+
+</details>
+
+<details>
+<summary>Java</summary>
+
+```java
+import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = "http://localhost:8080/ocr"; // 服务URL
+        String imagePath = "./demo.jpg"; // 本地图像
+        String outputImagePath = "./out.jpg"; // 输出图像
+
+        // 对本地图像进行Base64编码
+        File file = new File(imagePath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String imageData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put("image", imageData); // Base64编码的文件内容或者图像URL
+
+        // 创建 OkHttpClient 实例
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.Companion.get("application/json; charset=utf-8");
+        RequestBody body = RequestBody.Companion.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        // 调用API并处理接口返回数据
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get("result");
+                String base64Image = result.get("image").asText();
+                JsonNode texts = result.get("texts");
+
+                byte[] imageBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputImagePath)) {
+                    fos.write(imageBytes);
+                }
+                System.out.println("Output image saved at " + outputImagePath);
+                System.out.println("\nDetected texts: " + texts.toString());
+            } else {
+                System.err.println("Request failed with code: " + response.code());
+            }
+        }
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Go</summary>
+
+```go
+package main
+
+import (
+    "bytes"
+    "encoding/base64"
+    "encoding/json"
+    "fmt"
+    "io/ioutil"
+    "net/http"
+)
+
+func main() {
+    API_URL := "http://localhost:8080/ocr"
+    imagePath := "./demo.jpg"
+    outputImagePath := "./out.jpg"
+
+    // 对本地图像进行Base64编码
+    imageBytes, err := ioutil.ReadFile(imagePath)
+    if err != nil {
+        fmt.Println("Error reading image file:", err)
+        return
+    }
+    imageData := base64.StdEncoding.EncodeToString(imageBytes)
+
+    payload := map[string]string{"image": imageData} // Base64编码的文件内容或者图像URL
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println("Error marshaling payload:", err)
+        return
+    }
+
+    // 调用API
+    client := &http.Client{}
+    req, err := http.NewRequest("POST", API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println("Error creating request:", err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println("Error sending request:", err)
+        return
+    }
+    defer res.Body.Close()
+
+    // 处理接口返回数据
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println("Error reading response body:", err)
+        return
+    }
+    type Response struct {
+        Result struct {
+            Image      string   `json:"image"`
+            Texts []map[string]interface{} `json:"texts"`
+        } `json:"result"`
+    }
+    var respData Response
+    err = json.Unmarshal([]byte(string(body)), &respData)
+    if err != nil {
+        fmt.Println("Error unmarshaling response body:", err)
+        return
+    }
+
+    outputImageData, err := base64.StdEncoding.DecodeString(respData.Result.Image)
+    if err != nil {
+        fmt.Println("Error decoding base64 image data:", err)
+        return
+    }
+    err = ioutil.WriteFile(outputImagePath, outputImageData, 0644)
+    if err != nil {
+        fmt.Println("Error writing image to file:", err)
+        return
+    }
+    fmt.Printf("Image saved at %s.jpg\n", outputImagePath)
+    fmt.Println("\nDetected texts:")
+    for _, text := range respData.Result.Texts {
+        fmt.Println(text)
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>C#</summary>
+
+```csharp
+using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = "http://localhost:8080/ocr";
+    static readonly string imagePath = "./demo.jpg";
+    static readonly string outputImagePath = "./out.jpg";
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        // 对本地图像进行Base64编码
+        byte[] imageBytes = File.ReadAllBytes(imagePath);
+        string image_data = Convert.ToBase64String(imageBytes);
+
+        var payload = new JObject{ { "image", image_data } }; // Base64编码的文件内容或者图像URL
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json");
+
+        // 调用API
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        // 处理接口返回数据
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse["result"]["image"].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputImagePath, outputImageBytes);
+        Console.WriteLine($"Output image saved at {outputImagePath}");
+        Console.WriteLine("\nDetected texts:");
+        Console.WriteLine(jsonResponse["result"]["texts"].ToString());
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Node.js</summary>
+
+```js
+const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/ocr'
+const imagePath = './demo.jpg'
+const outputImagePath = "./out.jpg";
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'image': encodeImageToBase64(imagePath)  // Base64编码的文件内容或者图像URL
+  })
+};
+
+// 对本地图像进行Base64编码
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+// 调用API
+axios.request(config)
+.then((response) => {
+    // 处理接口返回数据
+    const result = response.data["result"];
+    const imageBuffer = Buffer.from(result["image"], 'base64');
+    fs.writeFile(outputImagePath, imageBuffer, (err) => {
+      if (err) throw err;
+      console.log(`Output image saved at ${outputImagePath}`);
+    });
+    console.log("\nDetected texts:");
+    console.log(result["texts"]);
+})
+.catch((error) => {
+  console.log(error);
+});
+```
+
+</details>
+
+<details>
+<summary>PHP</summary>
+
+```php
+<?php
+
+$API_URL = "http://localhost:8080/ocr"; // 服务URL
+$image_path = "./demo.jpg";
+$output_image_path = "./out.jpg";
+
+// 对本地图像进行Base64编码
+$image_data = base64_encode(file_get_contents($image_path));
+$payload = array("image" => $image_data); // Base64编码的文件内容或者图像URL
+
+// 调用API
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+// 处理接口返回数据
+$result = json_decode($response, true)["result"];
+file_put_contents($output_image_path, base64_decode($result["image"]));
+echo "Output image saved at " . $output_image_path . "\n";
+echo "\nDetected texts:\n";
+print_r($result["texts"]);
+
+?>
+```
+
+</details>
+</details>
+<br/>
+
+📱 **端侧部署**:端侧部署是一种将计算和数据处理功能放在用户设备本身上的方式,设备可以直接处理数据,而不需要依赖远程的服务器。PaddleX 支持将模型部署在 Android 等端侧设备上,详细的端侧部署流程请参考[PaddleX端侧部署指南](../../../pipeline_deploy/edge_deploy.md)。
+您可以根据需要选择合适的方式部署模型产线,进而进行后续的 AI 应用集成。
+
+## 4. 二次开发
+如果车辆属性识别产线提供的默认模型权重在您的场景中,精度或速度不满意,您可以尝试利用**您自己拥有的特定领域或应用场景的数据**对现有模型进行进一步的**微调**,以提升车辆属性识别产线的在您的场景中的识别效果。
+
+### 4.1 模型微调
+由于车辆属性识别产线包含车辆属性识别模块和车辆检测模块,如果模型产线的效果不及预期可能来自于其中任何一个模块。
+您可以对识别效果差的图片进行分析,如果在分析过程中发现有较多的主体目标未被检测出来,那么可能是车辆检测模型存在不足那么您需要参考[车辆检测模块开发教程](../../../module_usage/tutorials/cv_modules/human_detection.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/human_detection.md#四二次开发)章节,使用您的私有数据集对车辆检测模型进行微调;如果检测出来的主体属性识别错误,那么您需要参考[车辆属性识别模块开发教程](../../../module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md)中的[二次开发](../../../module_usage/tutorials/cv_modules/vehicle_attribute_recognition.md#四二次开发)章节,使用您的私有数据集对车辆属性识别模型进行微调。
+
+### 4.2 模型应用
+当您使用私有数据集完成微调训练后,可获得本地模型权重文件。
+
+若您需要使用微调后的模型权重,只需对产线配置文件做修改,将微调后模型权重的本地路径替换至产线配置文件中的对应位置即可:
+
+```
+......
+Pipeline:
+  model: PP-LCNet_x1_0  #可修改为微调后模型的本地路径
+  device: "gpu"
+  batch_size: 1
+......
+```
+随后, 参考本地体验中的命令行方式或 Python 脚本方式,加载修改后的产线配置文件即可。
+
+##  5. 多硬件支持
+PaddleX 支持英伟达 GPU、昆仑芯 XPU、昇腾 NPU和寒武纪 MLU 等多种主流硬件设备,**仅需修改 `--device` 参数**即可完成不同硬件之间的无缝切换。
+
+例如,您使用英伟达 GPU 进行车辆属性识别产线的推理,使用的命令为:
+
+```bash
+paddlex --pipeline vehicle_attribute_recognition --input vehicle_attribute_002.jpg --device gpu:0
+```
+此时,若您想将硬件切换为昇腾 NPU,仅需将 `--device` 修改为 npu:0 即可:
+
+```bash
+paddlex --pipeline vehicle_attribute_recognition --input vehicle_attribute_002.jpg --device npu:0
+```
+若您想在更多种类的硬件上使用车辆属性识别产线,请参考[PaddleX多硬件使用指南](../../../other_devices_support/multi_devices_use_guide.md)。

+ 616 - 0
docs/pipeline_usage/tutorials/cv_pipelines/vehicle_attribute_recognition_en.md

@@ -0,0 +1,616 @@
+English | [简体中文](vehicle_attribute_recognition.md)
+
+# Vehicle Attribute Recognition Pipeline Tutorial
+
+## 1. Introduction to Vehicle Attribute Recognition Pipeline
+Vehicle attribute recognition is a crucial component in computer vision systems. Its primary task is to locate and label specific attributes of vehicles in images or videos, such as vehicle type, color, and license plate number. This task not only requires accurately detecting vehicles but also identifying detailed attribute information for each vehicle. The vehicle attribute recognition pipeline is an end-to-end serial system for locating and recognizing vehicle attributes, widely used in traffic management, intelligent parking, security surveillance, autonomous driving, and other fields. It significantly enhances system efficiency and intelligence levels, driving the development and innovation of related industries.
+
+![](https://raw.githubusercontent.com/cuicheng01/PaddleX_doc_images/refs/heads/main/images/pipelines/vehicle_attribute_recognition/01.jpg)
+
+**The vehicle attribute recognition pipeline includes a vehicle detection module and a vehicle attribute recognition module**, with several models in each module. Which models to use can be selected based on the benchmark data below. **If you prioritize model accuracy, choose models with higher accuracy; if you prioritize inference speed, choose models with faster inference; if you prioritize model storage size, choose models with smaller storage**.
+
+<details>
+   <summary> 👉Model List Details</summary>
+
+**Vehicle Detection Module**:
+
+<table>
+  <tr>
+    <th>Model</th>
+    <th>mAP 0.5:0.95</th>
+    <th>GPU Inference Time (ms)</th>
+    <th>CPU Inference Time (ms)</th>
+    <th>Model Size (M)</th>
+    <th>Description</th>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-S_vehicle</td>
+    <td>61.3</td>
+    <td>15.4</td>
+    <td>178.4</td>
+    <td>28.79</td>
+    <td rowspan="2">Vehicle detection model based on PP-YOLOE</td>
+  </tr>
+  <tr>
+    <td>PP-YOLOE-L_vehicle</td>
+    <td>63.9</td>
+    <td>32.6</td>
+    <td>775.6</td>
+    <td>196.02</td>
+  </tr>
+</table>
+
+**Note: The above accuracy metrics are mAP(0.5:0.95) on the PPVehicle validation set. All GPU inference times are based on an NVIDIA Tesla T4 machine with FP32 precision. CPU inference speeds are based on an Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz with 8 threads and FP32 precision.**
+
+**Vehicle Attribute Recognition Module**:
+
+|Model|mA (%)|GPU Inference Time (ms)|CPU Inference Time (ms)|Model Size (M)|Description|
+|-|-|-|-|-|-|
+|PP-LCNet_x1_0_vehicle_attribute|91.7|3.84845|9.23735|6.7 M|PP-LCNet_x1_0_vehicle_attribute is a lightweight vehicle attribute recognition model based on PP-LCNet.|
+
+**Note: The above accuracy metrics are mA on the VeRi dataset. GPU inference times are based on an NVIDIA Tesla T4 machine with FP32 precision. CPU inference speeds are based on an Intel(R) Xeon(R) Gold 5117 CPU @ 2.00GHz with 8 threads and FP32 precision.**
+
+</details>
+
+## 2. Quick Start
+The pre-trained models provided by PaddleX can quickly demonstrate results. You can experience the effects of the vehicle attribute recognition pipeline online or locally using command line or Python.
+
+### 2.1 Online Experience
+Not supported yet.
+
+### 2.2 Local Experience
+Before using the vehicle attribute recognition pipeline locally, ensure you have installed the PaddleX wheel package according to the [PaddleX Local Installation Tutorial](../../../installation/installation_en.md).
+
+#### 2.2.1 Experience via Command Line
+You can quickly experience the vehicle attribute recognition pipeline with a single command. Use the [test file](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg) and replace `--input` with the local path for prediction.
+
+```bash
+paddlex --pipeline vehicle_attribute_recognition --input vehicle_attribute_002.jpg --device gpu:0
+```
+Parameter Description:
+
+```
+--pipeline: The name of the pipeline, here it is the vehicle attribute recognition pipeline.
+--input: The local path or URL of the input image to be processed.
+--device: The index of the GPU to use (e.g., gpu:0 means using the first GPU, gpu:1,2 means using the second and third GPUs). You can also choose to use the CPU (--device cpu).
+```
+
+When executing the above Python script, the default vehicle attribute recognition pipeline configuration file is loaded. If you need a custom configuration file, you can run the following command to obtain it:
+
+<details>
+   <summary> 👉Click to Expand</summary>
+
+```
+paddlex --get_pipeline_config vehicle_attribute_recognition
+```
+After execution, the vehicle attribute recognition pipeline configuration file will be saved in the current directory. If you wish to specify a custom save location, you can run the following command (assuming the custom save location is `./my_path`):
+
+```
+paddlex --get_pipeline_config vehicle_attribute_recognition --save_path ./my_path
+```
+
+After obtaining the pipeline configuration file, you can replace `--pipeline` with the saved path of the configuration file to make it effective. For example, if the saved path of the configuration file is `./vehicle_attribute_recognition.yaml`, just execute:
+
+```bash
+paddlex --pipeline ./vehicle_attribute_recognition.yaml --input vehicle_attribute_002.jpg --device gpu:0
+```
+Among them, parameters such as `--model` and `--device` do not need to be specified, and the parameters in the configuration file will be used. If parameters are still specified, the specified parameters will take precedence.
+
+</details>
+
+#### 2.2.2 Integrating via Python Script
+A few lines of code suffice for rapid inference on the production line, taking the vehicle attribute recognition pipeline as an example:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(pipeline="vehicle_attribute_recognition")
+
+output = pipeline.predict("vehicle_attribute_002.jpg")
+for res in output:
+    res.print()  ## Print the structured output of the prediction
+    res.save_to_img("./output/")  ## Save the visualized result image
+    res.save_to_json("./output/")  ## Save the structured output of the prediction
+```
+The results obtained are the same as those from the command line method.
+
+In the above Python script, the following steps are executed:
+
+(1) Instantiate the `create_pipeline` to create a pipeline object: Specific parameter descriptions are as follows:
+
+| Parameter | Description | Parameter Type | Default Value |
+|-|-|-|-|
+| `pipeline` | The name of the pipeline or the path to the pipeline configuration file. If it is the name of the pipeline, it must be a pipeline supported by PaddleX. | `str` | None |
+| `device` | The device for pipeline model inference. Supports: "gpu", "cpu". | `str` | "gpu" |
+| `use_hpip` | Whether to enable high-performance inference, only available when the pipeline supports high-performance inference. | `bool` | `False` |
+
+(2) Call the `predict` method of the vehicle attribute recognition pipeline object for inference prediction: The `predict` method parameter is `x`, which is used to input data to be predicted, supporting multiple input methods. Specific examples are as follows:
+
+| Parameter Type | Description |
+|----------------|-----------------------------------------------------------------------------------------------------------|
+| Python Var | Supports directly passing in Python variables, such as image data represented by numpy.ndarray. |
+| `str` | Supports passing in the file path of the data to be predicted, such as the local path of an image file: `/root/data/img.jpg`. |
+| `str` | Supports passing in the URL of the data file to be predicted, such as the network URL of an image file: [Example](https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg). |
+| `str` | Supports passing in a local directory, which should contain data files to be predicted, such as the local path: `/root/data/`. |
+| `dict` | Supports passing in a dictionary type, where the key needs to correspond to the specific task, such as "img" for the vehicle attribute recognition task, and the value of the dictionary supports the above data types, for example: `{"img": "/root/data1"}`. |
+| `list` | Supports passing in a list, where the elements of the list need to be the above data types, such as `[numpy.ndarray, numpy.ndarray], ["/root/data/img1.jpg", "/root/data/img2.jpg"], ["/root/data1", "/root/data2"], [{"img": "/root/data1"}, {"img": "/root/data2/img.jpg"}]`. |
+
+(3) Obtain the prediction results by calling the `predict` method: The `predict` method is a `generator`, so prediction results need to be obtained through iteration. The `predict` method predicts data in batches, so the prediction results are in the form of a list representing a set of prediction results.
+
+(4) Processing the Prediction Results: The prediction result for each sample is in `dict` format, which supports printing or saving to a file. The supported file types for saving depend on the specific pipeline, such as:
+
+| Method         | Description                   | Method Parameters                                                                                        |
+|----------------|-------------------------------|----------------------------------------------------------------------------------------------------------|
+| print          | Print results to the terminal | `- format_json`: bool, whether to format the output content with json indentation, default is True;<br>`- indent`: int, json formatting setting, only effective when format_json is True, default is 4;<br>`- ensure_ascii`: bool, json formatting setting, only effective when format_json is True, default is False; |
+| save_to_json   | Save results as a json file   | `- save_path`: str, the path to save the file, when it is a directory, the saved file name is consistent with the input file type;<br>`- indent`: int, json formatting setting, default is 4;<br>`- ensure_ascii`: bool, json formatting setting, default is False; |
+| save_to_img    | Save results as an image file | `- save_path`: str, the path to save the file, when it is a directory, the saved file name is consistent with the input file type; |
+
+If you have obtained the configuration file, you can customize the configurations for the vehicle attribute recognition pipeline by simply modifying the `pipeline` parameter in the `create_pipeline` method to the path of your pipeline configuration file.
+
+For example, if your configuration file is saved at `./my_path/vehicle_attribute_recognition.yaml`, you only need to execute:
+
+```python
+from paddlex import create_pipeline
+pipeline = create_pipeline(pipeline="./my_path/vehicle_attribute_recognition.yaml")
+output = pipeline.predict("vehicle_attribute_002.jpg")
+for res in output:
+    res.print()  # Print the structured output of the prediction
+    res.save_to_img("./output/")  # Save the visualized result image
+    res.save_to_json("./output/")  # Save the structured output of the prediction
+```
+
+## 3. Development Integration/Deployment
+If the face recognition pipeline meets your requirements for inference speed and accuracy, you can proceed directly with development integration/deployment.
+
+If you need to directly apply the face recognition pipeline in your Python project, you can refer to the example code in [2.2.2 Python Script Integration](#222-python-script-integration).
+
+Additionally, PaddleX provides three other deployment methods, detailed as follows:
+
+🚀 **High-Performance Inference**: In actual production environments, many applications have stringent standards for the performance metrics of deployment strategies (especially response speed) to ensure efficient system operation and smooth user experience. To this end, PaddleX provides high-performance inference plugins aimed at deeply optimizing model inference and pre/post-processing to significantly speed up the end-to-end process. For detailed high-performance inference procedures, please refer to the [PaddleX High-Performance Inference Guide](../../../pipeline_deploy/high_performance_inference.md).
+
+☁️ **Service-Oriented Deployment**: Service-oriented deployment is a common deployment form in actual production environments. By encapsulating inference functionality as services, clients can access these services through network requests to obtain inference results. PaddleX supports users in achieving service-oriented deployment of pipelines at low cost. For detailed service-oriented deployment procedures, please refer to the [PaddleX Service-Oriented Deployment Guide](../../../pipeline_deploy/service_deploy.md).
+
+Below are the API reference and multi-language service invocation examples:
+
+<details>
+<summary>API Reference</summary>
+
+For all operations provided by the service:
+
+- The response body and the request body of POST requests are both JSON data (JSON objects).
+- When the request is successfully processed, the response status code is `200`, and the attributes of the response body are as follows:
+
+    | Name | Type | Meaning |
+    |-|-|-|
+    |`errorCode`|`integer`|Error code. Fixed to `0`. |
+    |`errorMsg`|`string`|Error description. Fixed to `"Success"`. |
+
+    The response body may also have a `result` attribute of type `object`, which stores the operation result information.
+
+- When the request is not successfully processed, the attributes of the response body are as follows:
+
+    | Name | Type | Meaning |
+    |-|-|-|
+    |`errorCode`|`integer`|Error code. Same as the response status code. |
+    |`errorMsg`|`string`|Error description. |
+
+The operations provided by the service are as follows:
+
+- **`infer`**
+
+    Obtain OCR results for an image.
+
+    `POST /ocr`
+
+    - The attributes of the request body are as follows:
+
+        | Name | Type | Meaning | Required |
+        |-|-|-|-|
+        |`image`|`string`|The URL of an accessible image file or the Base64 encoded result of the image file content. |Yes|
+        |`inferenceParams`|`object`|Inference parameters. |No|
+
+        The attributes of```markdown
+<details>
+<summary>Python</summary>
+
+```python
+import base64
+import requests
+
+API_URL = "http://localhost:8080/ocr" # Service URL
+image_path = "./demo.jpg"
+output_image_path = "./out.jpg"
+
+# Encode the local image to Base64
+with open(image_path, "rb") as file:
+    image_bytes = file.read()
+    image_data = base64.b64encode(image_bytes).decode("ascii")
+
+payload = {"image": image_data}  # Base64 encoded file content or image URL
+
+# Call the API
+response = requests.post(API_URL, json=payload)
+
+# Process the response data
+assert response.status_code == 200
+result = response.json()["result"]
+with open(output_image_path, "wb") as file:
+    file.write(base64.b64decode(result["image"]))
+print(f"Output image saved at {output_image_path}")
+print("\nDetected texts:")
+print(result["texts"])
+```
+
+</details>
+
+<details>
+<summary>C++</summary>
+
+```cpp
+#include <iostream>
+#include "cpp-httplib/httplib.h" // https://github.com/Huiyicc/cpp-httplib
+#include "nlohmann/json.hpp" // https://github.com/nlohmann/json
+#include "base64.hpp" // https://github.com/tobiaslocker/base64
+
+int main() {
+    httplib::Client client("localhost:8080");
+    const std::string imagePath = "./demo.jpg";
+    const std::string outputImagePath = "./out.jpg";
+
+    httplib::Headers headers = {
+        {"Content-Type", "application/json"}
+    };
+
+    // Encode the local image to Base64
+    std::ifstream file(imagePath, std::ios::binary | std::ios::ate);
+    std::streamsize size = file.tellg();
+    file.seekg(0, std::ios::beg);
+
+    std::vector<char> buffer(size);
+    if (!file.read(buffer.data(), size)) {
+        std::cerr << "Error reading file." << std::endl;
+        return 1;
+    }
+    std::string bufferStr(reinterpret_cast<const char*>(buffer.data()), buffer.size());
+    std::string encodedImage = base64::to_base64(bufferStr);
+
+    nlohmann::json jsonObj;
+    jsonObj["image"] = encodedImage;
+    std::string body = jsonObj.dump();
+
+    // Call the API
+    auto response = client.Post("/ocr", headers, body, "application/json");
+    // Process the response data
+    if (response && response->status == 200) {
+        nlohmann::json jsonResponse = nlohmann::json::parse(response->body);
+        auto result = jsonResponse["result"];
+
+        encodedImage = result["image"];
+        std::string decodedString = base64::from_base64(encodedImage);
+        std::vector<unsigned char> decodedImage(decodedString.begin(), decodedString.end());
+        std::ofstream outputImage(outputImagePath, std::ios::binary | std::ios::out);
+        if (outputImage.is_open()) {
+            outputImage.write(reinterpret_cast<char*>(decodedImage.data()), decodedImage.size());
+            outputImage.close();
+            std::cout << "Output image saved at " << outputImagePath << std::endl;
+        } else {
+            std::cerr << "Unable to open file for writing: " << outputImagePath << std::endl;
+        }
+
+        auto texts = result["texts"];
+        std::cout << "\nDetected texts:" << std::endl;
+        for (const auto& text : texts) {
+            std::cout << text << std::endl;
+        }
+    } else {
+        std::cout << "Failed to send HTTP request." << std::endl;
+        return 1;
+    }
+
+    return 0;
+}
+
+```
+
+</details>
+``````markdown
+# Tutorial on Artificial Intelligence and Computer Vision
+
+This tutorial, intended for numerous developers, covers the basics and applications of AI and Computer Vision.
+
+<details>
+<summary>Java</summary>
+
+```java
+import okhttp3.*;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.JsonNode;
+import com.fasterxml.jackson.databind.node.ObjectNode;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.Base64;
+
+public class Main {
+    public static void main(String[] args) throws IOException {
+        String API_URL = "http://localhost:8080/ocr"; // Service URL
+        String imagePath = "./demo.jpg"; // Local image path
+        String outputImagePath = "./out.jpg"; // Output image path
+
+        // Encode the local image to Base64
+        File file = new File(imagePath);
+        byte[] fileContent = java.nio.file.Files.readAllBytes(file.toPath());
+        String imageData = Base64.getEncoder().encodeToString(fileContent);
+
+        ObjectMapper objectMapper = new ObjectMapper();
+        ObjectNode params = objectMapper.createObjectNode();
+        params.put("image", imageData); // Base64-encoded file content or image URL
+
+        // Create an OkHttpClient instance
+        OkHttpClient client = new OkHttpClient();
+        MediaType JSON = MediaType.get("application/json; charset=utf-8");
+        RequestBody body = RequestBody.create(params.toString(), JSON);
+        Request request = new Request.Builder()
+                .url(API_URL)
+                .post(body)
+                .build();
+
+        // Call the API and process the response
+        try (Response response = client.newCall(request).execute()) {
+            if (response.isSuccessful()) {
+                String responseBody = response.body().string();
+                JsonNode resultNode = objectMapper.readTree(responseBody);
+                JsonNode result = resultNode.get("result");
+                String base64Image = result.get("image").asText();
+                JsonNode texts = result.get("texts");
+
+                byte[] imageBytes = Base64.getDecoder().decode(base64Image);
+                try (FileOutputStream fos = new FileOutputStream(outputImagePath)) {
+                    fos.write(imageBytes);
+                }
+                System.out.println("Output image saved at " + outputImagePath);
+                System.out.println("\nDetected texts: " + texts.toString());
+            } else {
+                System.err.println("Request failed with code: " + response.code());
+            }
+        }
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Go</summary>
+
+```go
+package main
+
+import (
+    "bytes"
+    "encoding/base64"
+    "encoding/json"
+    "fmt"
+    "io/ioutil"
+    "net/http"
+)
+
+func main() {
+    API_URL := "http://localhost:8080/ocr"
+    imagePath := "./demo.jpg"
+    outputImagePath := "./out.jpg"
+
+    // Encode the local image to Base64
+    imageBytes, err := ioutil.ReadFile(imagePath)
+    if err != nil {
+        fmt.Println("Error reading image file:", err)
+        return
+    }
+    imageData := base64.StdEncoding.EncodeToString(imageBytes)
+
+    payload := map[string]string{"image": imageData} // Base64-encoded file content or image URL
+    payloadBytes, err := json.Marshal(payload)
+    if err != nil {
+        fmt.Println("Error marshaling payload:", err)
+        return
+    }
+
+    // Call the API
+    client := &http.Client{}
+    req, err := http.NewRequest("POST", API_URL, bytes.NewBuffer(payloadBytes))
+    if err != nil {
+        fmt.Println("Error creating request:", err)
+        return
+    }
+
+    res, err := client.Do(req)
+    if err != nil {
+        fmt.Println("Error sending request:", err)
+        return
+    }
+    defer res.Body.Close()
+
+    // Process the response
+    body, err := ioutil.ReadAll(res.Body)
+    if err != nil {
+        fmt.Println("Error reading response body:", err)
+        return
+    }```markdown
+# An English Tutorial on Artificial Intelligence and Computer Vision
+
+This tutorial document is intended for numerous developers and covers content related to artificial intelligence and computer vision.
+
+<details>
+<summary>C#</summary>
+
+```csharp
+using System;
+using System.IO;
+using System.Net.Http;
+using System.Net.Http.Headers;
+using System.Text;
+using System.Threading.Tasks;
+using Newtonsoft.Json.Linq;
+
+class Program
+{
+    static readonly string API_URL = "http://localhost:8080/ocr";
+    static readonly string imagePath = "./demo.jpg";
+    static readonly string outputImagePath = "./out.jpg";
+
+    static async Task Main(string[] args)
+    {
+        var httpClient = new HttpClient();
+
+        // Encode the local image to Base64
+        byte[] imageBytes = File.ReadAllBytes(imagePath);
+        string image_data = Convert.ToBase64String(imageBytes);
+
+        var payload = new JObject{ { "image", image_data } }; // Base64 encoded file content or image URL
+        var content = new StringContent(payload.ToString(), Encoding.UTF8, "application/json");
+
+        // Call the API
+        HttpResponseMessage response = await httpClient.PostAsync(API_URL, content);
+        response.EnsureSuccessStatusCode();
+
+        // Process the API response
+        string responseBody = await response.Content.ReadAsStringAsync();
+        JObject jsonResponse = JObject.Parse(responseBody);
+
+        string base64Image = jsonResponse["result"]["image"].ToString();
+        byte[] outputImageBytes = Convert.FromBase64String(base64Image);
+
+        File.WriteAllBytes(outputImagePath, outputImageBytes);
+        Console.WriteLine($"Output image saved at {outputImagePath}");
+        Console.WriteLine("\nDetected texts:");
+        Console.WriteLine(jsonResponse["result"]["texts"].ToString());
+    }
+}
+```
+
+</details>
+
+<details>
+<summary>Node.js</summary>
+
+```js
+const axios = require('axios');
+const fs = require('fs');
+
+const API_URL = 'http://localhost:8080/ocr';
+const imagePath = './demo.jpg';
+const outputImagePath = "./out.jpg";
+
+let config = {
+   method: 'POST',
+   maxBodyLength: Infinity,
+   url: API_URL,
+   data: JSON.stringify({
+    'image': encodeImageToBase64(imagePath)  // Base64 encoded file content or image URL
+  })
+};
+
+// Encode the local image to Base64
+function encodeImageToBase64(filePath) {
+  const bitmap = fs.readFileSync(filePath);
+  return Buffer.from(bitmap).toString('base64');
+}
+
+// Call the API
+axios.request(config)
+.then((response) => {
+    // Process the API response
+    const result = response.data["result"];
+    const imageBuffer = Buffer.from(result["image"], 'base64');
+    fs.writeFile(outputImagePath, imageBuffer, (err) => {
+      if (err) throw err;
+      console.log(`Output image saved at ${outputImagePath}`);
+    });
+    console.log("\nDetected texts:");
+    console.log(result["texts"]);
+})
+.catch((error) => {
+  console.log(error);
+});
+```
+
+</details>
+
+<details>
+<summary>PHP</summary>
+
+```php
+<?php
+
+$API_URL = "http://localhost:8080/ocr"; // Service URL
+$image_path = "./demo.jpg";
+$output_image_path = "./out.jpg";
+
+// Encode the local image to Base64
+$image_data = base64_encode(file_get_contents($image_path));
+$payload = array("image" => $image_data); // Base64 encoded file content or image URL
+
+// Call the API
+$ch = curl_init($API_URL);
+curl_setopt($ch, CURLOPT_POST, true);
+curl_setopt($ch, CURLOPT_POSTFIELDS, json_encode($payload));
+curl_setopt($ch, CURLOPT_RETURNTRANSFER, true);
+$response = curl_exec($ch);
+curl_close($ch);
+
+// Process the API response
+$result = json_decode($response, true)["result"];
+file_put_contents($output
+```
+
+<details>
+<details>
+<br/>
+
+📱 **Edge Deployment**: Edge deployment is a method where computing and data processing functions are placed on the user's device itself, allowing the device to process data directly without relying on remote servers. PaddleX supports deploying models on edge devices such as Android. For detailed edge deployment procedures, please refer to the [PaddleX Edge Deployment Guide](../../../pipeline_deploy/edge_deploy_en.md).
+You can choose an appropriate method to deploy your model pipeline based on your needs, and proceed with subsequent AI application integration.
+
+
+## 4. Custom Development
+If the default model weights provided by the Face Recognition Pipeline do not meet your expectations in terms of accuracy or speed for your specific scenario, you can try to further **fine-tune** the existing models using **your own domain-specific or application-specific data** to enhance the recognition performance of the pipeline in your scenario.
+
+### 4.1 Model Fine-tuning
+Since the Face Recognition Pipeline consists of two modules (face detection and face recognition), the suboptimal performance of the pipeline may stem from either module.
+
+You can analyze images with poor recognition results. If you find that many faces are not detected during the analysis, it may indicate deficiencies in the face detection model. In this case, you need to refer to the [Custom Development](../../../module_usage/tutorials/cv_modules/face_detection_en.md#IV.-Custom-Development) section in the [Face Detection Module Development Tutorial](../../../module_usage/tutorials/cv_modules/face_detection_en.md) and use your private dataset to fine-tune the face detection model. If matching errors occur in detected faces, it suggests that the face feature model needs further improvement. You should refer to the [Custom Development](../../../module_usage/tutorials/cv_modules/face_feature_en.md#IV.-Custom-Development) section in the [Face Feature Module Development Tutorial](../../../module_usage/tutorials/cv_modules/face_feature_en.md) to fine-tune the face feature model.
+
+### 4.2 Model Application
+After completing fine-tuning training with your private dataset, you will obtain local model weight files.
+
+To use the fine-tuned model weights, you only need to modify the pipeline configuration file by replacing the local paths of the fine-tuned model weights with the corresponding paths in the pipeline configuration file:
+
+```bash
+
+......
+Pipeline:
+  device: "gpu:0"
+  det_model: "BlazeFace"        # Can be modified to the local path of the fine-tuned face detection model
+  rec_model: "MobileFaceNet"    # Can be modified to the local path of the fine-tuned face recognition model
+  det_batch_size: 1
+  rec_batch_size: 1
+  device: gpu
+......
+```
+Subsequently, refer to the command-line method or Python script method in [2.2 Local Experience](#22-Local-Experience) to load the modified pipeline configuration file.
+Note: Currently, setting separate `batch_size` for face detection and face recognition models is not supported.
+
+## 5. Multi-hardware Support
+PaddleX supports various mainstream hardware devices such as NVIDIA GPUs, Kunlun XPU, Ascend NPU, and Cambricon MLU. **Simply modifying the `--device` parameter** allows seamless switching between different hardware.
+
+For example, when running the face recognition pipeline using Python and changing the running device from an NVIDIA GPU to an Ascend NPU, you only need to modify the `device` in the script to `npu`:
+
+```python
+from paddlex import create_pipeline
+
+pipeline = create_pipeline(
+    pipeline="face_recognition",
+    device="npu:0" # gpu:0 --> npu:0
+)
+```
+If you want to use the face recognition pipeline on more types of hardware, please refer to the [PaddleX Multi-device Usage Guide](../../../other_devices_support/multi_devices_use_guide_en.md).

+ 1 - 1
paddlex/inference/pipelines/attribute_recognition.py

@@ -24,7 +24,7 @@ from .base import BasePipeline
 class AttributeRecPipeline(BasePipeline):
     """Attribute Rec Pipeline"""
 
-    entities = ["pedestrian_attribute", "vehicle_attribute"]
+    entities = ["pedestrian_attribute_recognition", "vehicle_attribute_recognition"]
 
     def __init__(
         self,

+ 1 - 3
paddlex/inference/results/attribute_rec.py

@@ -31,9 +31,7 @@ def draw_attribute_result(img, boxes):
     Returns:
         img (PIL.Image.Image): visualized image
     """
-    font_size = int(0.024 * int(img.width)) + 2
-    if isinstance(boxes[0]["label"], list):
-        font_size = int(font_size * 0.7)
+    font_size = int((0.024 * int(img.width) + 2) * 0.7)
     font = ImageFont.truetype(PINGFANG_FONT_FILE_PATH, font_size, encoding="utf-8")
 
     draw_thickness = int(max(img.size) * 0.005)

+ 1 - 1
paddlex/pipelines/pedestrian_attribute.yaml → paddlex/pipelines/pedestrian_attribute_recognition.yaml

@@ -1,5 +1,5 @@
 Global:
-  pipeline_name: pedestrian_attribute
+  pipeline_name: pedestrian_attribute_recognition
   input: https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/pedestrian_attribute_002.jpg
   
 Pipeline:

+ 1 - 1
paddlex/pipelines/vehicle_attribute.yaml → paddlex/pipelines/vehicle_attribute_recognition.yaml

@@ -1,5 +1,5 @@
 Global:
-  pipeline_name: vehicle_attribute
+  pipeline_name: vehicle_attribute_recognition
   input: https://paddle-model-ecology.bj.bcebos.com/paddlex/imgs/demo_image/vehicle_attribute_002.jpg
   
 Pipeline:

+ 0 - 1
paddlex/repo_manager/requirements.txt

@@ -7,7 +7,6 @@ editdistance
 openpyxl
 premailer
 python-docx
-requests <= 2.29
 ######## For Chatocrv3 #######
 qianfan==0.0.3
 langchain==0.1.5