|
@@ -237,7 +237,7 @@ The visualization results are saved under `save_path`, and the visualization res
|
|
|
|
|
|
|
|
```python
|
|
```python
|
|
|
from paddlex import create_pipeline
|
|
from paddlex import create_pipeline
|
|
|
-pipeline = create_pipeline(pipeline_name="semantic_segmentation")
|
|
|
|
|
|
|
+pipeline = create_pipeline(pipeline="semantic_segmentation")
|
|
|
output = pipeline.predict(input="general_semantic_segmentation_002.png", target_size = -1)
|
|
output = pipeline.predict(input="general_semantic_segmentation_002.png", target_size = -1)
|
|
|
for res in output:
|
|
for res in output:
|
|
|
res.print()
|
|
res.print()
|
|
@@ -255,31 +255,32 @@ In the above Python script, the following steps are executed:
|
|
|
<th>Parameter</th>
|
|
<th>Parameter</th>
|
|
|
<th>Description</th>
|
|
<th>Description</th>
|
|
|
<th>Type</th>
|
|
<th>Type</th>
|
|
|
-<th>Default Value</th>
|
|
|
|
|
|
|
+<th>Default</th>
|
|
|
</tr>
|
|
</tr>
|
|
|
</thead>
|
|
</thead>
|
|
|
<tbody>
|
|
<tbody>
|
|
|
<tr>
|
|
<tr>
|
|
|
-<td><code>pipeline_name</code></td>
|
|
|
|
|
-<td>The name of the pipeline, which must be supported by PaddleX.</td>
|
|
|
|
|
|
|
+<td><code>pipeline</code></td>
|
|
|
|
|
+<td>Pipeline name or path to pipeline config file, if it's set as a pipeline name, it must be a pipeline supported by PaddleX.</td>
|
|
|
<td><code>str</code></td>
|
|
<td><code>str</code></td>
|
|
|
<td><code>None</code></td>
|
|
<td><code>None</code></td>
|
|
|
</tr>
|
|
</tr>
|
|
|
<tr>
|
|
<tr>
|
|
|
<td><code>config</code></td>
|
|
<td><code>config</code></td>
|
|
|
-<td>The path to the pipeline configuration file.</td>
|
|
|
|
|
-<td><code>str</code></td>
|
|
|
|
|
|
|
+<td>Specific configuration information for the pipeline (if set simultaneously with the <code>pipeline</code>, it takes precedence over the <code>pipeline</code>, and the pipeline name must match the <code>pipeline</code>).
|
|
|
|
|
+</td>
|
|
|
|
|
+<td><code>dict[str, Any]</code></td>
|
|
|
<td><code>None</code></td>
|
|
<td><code>None</code></td>
|
|
|
</tr>
|
|
</tr>
|
|
|
<tr>
|
|
<tr>
|
|
|
<td><code>device</code></td>
|
|
<td><code>device</code></td>
|
|
|
-<td>The device for pipeline inference. It supports specifying specific GPU card numbers (e.g., "gpu:0"), specific card numbers for other hardware (e.g., "npu:0"), and CPU (e.g., "cpu").</td>
|
|
|
|
|
|
|
+<td>Pipeline inference device. Supports specifying the specific GPU card number, such as "gpu:0", other hardware specific card numbers, such as "npu:0", CPU such as "cpu".</td>
|
|
|
<td><code>str</code></td>
|
|
<td><code>str</code></td>
|
|
|
<td><code>None</code></td>
|
|
<td><code>None</code></td>
|
|
|
</tr>
|
|
</tr>
|
|
|
<tr>
|
|
<tr>
|
|
|
<td><code>use_hpip</code></td>
|
|
<td><code>use_hpip</code></td>
|
|
|
-<td>Whether to enable high-performance inference, which is only available if the pipeline supports it.</td>
|
|
|
|
|
|
|
+<td>Whether to enable high-performance inference, only available when the pipeline supports high-performance inference.</td>
|
|
|
<td><code>bool</code></td>
|
|
<td><code>bool</code></td>
|
|
|
<td><code>False</code></td>
|
|
<td><code>False</code></td>
|
|
|
</tr>
|
|
</tr>
|