git clone https://gitee.com/zhch158_admin/dots.ocr.git
git config --local user.name "zhch158_admin"
git config --local user.email "zhch158@sina.com"
# 自定义缓存时间(如7200秒)
git config --global credential.helper 'cache --timeout=7200'
conda create -n py312 python=3.12
# vllm目前最高支持到3.12
flash-attn 确实没有 macOS 的预编译包,因为 flash-attn 是专门为 NVIDIA GPU(CUDA)设计的,而 Mac 使用的是 Apple Silicon 或 Intel CPU,不支持 CUDA
cd dots.ocr
conda activate py312
uv pip install torch==2.7.0 torchvision==0.22.0 torchaudio==2.7.0
uv pip install -e .
uv pip install vllm==0.9.1 --torch-backend=auto
# 解决vllm版本兼容问题,检查当前版本
pip list | grep -E "(vllm|transformers)"
# 使用最新兼容版本
pip install --upgrade vllm transformers torch
# 查看端口占用情况
sudo lsof -i:8101
# 显示所有监听的端口
sudo ss -tuln
# 显示特定端口
sudo ss -tulpn | grep :8101
# 显示进程信息
sudo ss -tulpn sport = :8101
"configurations": [
{
"name": "Python Debugger: Current File",
"type": "debugpy",
"request": "launch",
"program": "${file}",
"console": "integratedTerminal",
"cwd": "${fileDirname}",
"env": {"PYTHONPATH":"${workspaceFolder};${env:PYTHONPATH}"},
"envFile": "${workspaceFolder}/.env",
"justMyCode": false
},
{
"name": "app",
"type": "debugpy",
"request": "launch",
// "module": "paddlex.__main__",
"program": "${workspaceFolder}/zhch/paddlex_cli.py",
"console": "integratedTerminal",
"cwd": "${workspaceFolder}/zhch",
"env": {"PYTHONPATH":"${workspaceFolder};${env:PYTHONPATH}"},
"envFile": "${workspaceFolder}/.env",
"args": [
// "-m", "paddlex.paddlex_cli",
// "--get_pipeline_config", "${workspaceFolder}/zhch/PP-StructureV3-zhch.yaml"
"--pipeline", "PP-StructureV3",
"--use_doc_orientation_classify=True",
"--use_doc_unwarping=True",
"--input", "sample_data/300674-母公司现金流量表-扫描.png",
"--save_path", "sample_data/output",
"--device", "gpu",
],
"justMyCode": false
}
]
zhch/launch_model_vllm.sh 需要在/home/ubuntu/anaconda3/envs/py312/bin/vllm中加入DotsOCR
#!/home/ubuntu/anaconda3/envs/py312/bin/python3
# -*- coding: utf-8 -*-
import sys
from vllm.entrypoints.cli.main import main
from DotsOCR import modeling_dots_ocr_vllm
if __name__ == "__main__":
if sys.argv[0].endswith("-script.pyw"):
sys.argv[0] = sys.argv[0][:-11]
elif sys.argv[0].endswith(".exe"):
sys.argv[0] = sys.argv[0][:-4]
sys.exit(main())
# 监控内存使用情况
watch -n 1 nvidia-smi
# 启动应用
python demo_gradio.py
https://dotsocr.xiaohongshu.com/
Flash Attention 是一种高效的注意力机制实现,通过重新设计内存访问模式来显著提升性能:
分块计算 (Block-wise Computation)
内存层次优化
在线 Softmax
# demo/demo_hf.py
model = AutoModelForCausalLM.from_pretrained(
model_path,
attn_implementation="flash_attention_2", # 启用 Flash Attention 2
torch_dtype=torch.bfloat16,
device_map="auto",
trust_remote_code=True
)
flash-attn==2.8.0.post2 # CUDA 专用,不支持 macOS