paddle_local_daemon.sh 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. #!/bin/bash
  2. # filepath: ocr_platform/ocr_tools/daemons/paddleocr_local_daemon.sh
  3. # 对应: PaddleOCR-VL 本地 llama-server 服务(macOS),使用 GGUF 格式模型
  4. # 适用于 Mac M4 Pro 48G,使用 Metal GPU 加速
  5. # 模型下载地址: https://huggingface.co/PaddlePaddle/PaddleOCR-VL-1.5-GGUF
  6. # unset https_proxy http_proxy HF_ENDPOINT
  7. # llama-server -hf PaddlePaddle/PaddleOCR-VL-1.5-GGUF
  8. # mv ~/Library/Caches/llama.cpp/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5.gguf ~/models/paddleocr_vl
  9. # mv ~/Library/Caches/llama.cpp/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5-mmproj.gguf ~/models/paddleocr_vl
  10. # curl -X POST http://localhost:8102/v1/chat/completions -d @payload.json
  11. LOGDIR="$HOME/workspace/logs"
  12. mkdir -p $LOGDIR
  13. PIDFILE="$LOGDIR/paddleocr_llamaserver.pid"
  14. LOGFILE="$LOGDIR/paddleocr_llamaserver.log"
  15. # 配置参数
  16. CONDA_ENV="mineru2"
  17. PORT="8102"
  18. HOST="0.0.0.0"
  19. # 本地 GGUF 模型路径
  20. MODEL_PATH="$HOME/models/paddleocr_vl/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5.gguf"
  21. MMPROJ_PATH="$HOME/models/paddleocr_vl/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5-mmproj.gguf"
  22. # llama-server 参数
  23. CONTEXT_SIZE="16384" # 上下文长度(需 >= max_tokens,推荐 8192-16384)
  24. GPU_LAYERS="99" # Metal GPU 层数(99 表示全部)
  25. THREADS="8" # CPU 线程数(M4 Pro 建议值)
  26. BATCH_SIZE="512" # 批处理大小
  27. UBATCH_SIZE="128" # 微批处理大小
  28. # conda 环境激活
  29. if [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
  30. source "$HOME/anaconda3/etc/profile.d/conda.sh"
  31. conda activate $CONDA_ENV
  32. elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then
  33. source "$HOME/miniconda3/etc/profile.d/conda.sh"
  34. conda activate $CONDA_ENV
  35. elif [ -f "/opt/miniconda3/etc/profile.d/conda.sh" ]; then
  36. source /opt/miniconda3/etc/profile.d/conda.sh
  37. conda activate $CONDA_ENV
  38. else
  39. echo "Warning: conda initialization file not found, trying direct path"
  40. export PATH="/opt/miniconda3/envs/$CONDA_ENV/bin:$PATH"
  41. fi
  42. start() {
  43. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  44. echo "PaddleOCR-VL llama-server 已在运行"
  45. return 1
  46. fi
  47. echo "启动 PaddleOCR-VL llama-server 守护进程..."
  48. echo "Host: $HOST, Port: $PORT"
  49. echo "主模型: $MODEL_PATH"
  50. echo "多模态投影器: $MMPROJ_PATH"
  51. echo "上下文长度: $CONTEXT_SIZE"
  52. echo "GPU 层数: $GPU_LAYERS (Metal)"
  53. echo "线程数: $THREADS"
  54. # 检查模型文件是否存在
  55. if [ ! -f "$MODEL_PATH" ]; then
  56. echo "❌ 主模型文件不存在: $MODEL_PATH"
  57. echo "请确认模型已下载到 llama.cpp 缓存目录"
  58. return 1
  59. fi
  60. if [ ! -f "$MMPROJ_PATH" ]; then
  61. echo "❌ 多模态投影器文件不存在: $MMPROJ_PATH"
  62. echo "请确认 mmproj 文件已下载"
  63. return 1
  64. fi
  65. # 检查 llama-server 命令
  66. if ! command -v llama-server >/dev/null 2>&1; then
  67. echo "❌ llama-server 未找到"
  68. echo "请安装: brew install llama.cpp"
  69. return 1
  70. fi
  71. echo "🔧 使用 llama-server: $(which llama-server)"
  72. echo "🔧 llama.cpp 版本: $(llama-server --version 2>&1 | head -1 || echo 'Unknown')"
  73. echo "💻 系统信息:"
  74. echo " 架构: $(uname -m)"
  75. echo " 系统: $(uname -s)"
  76. echo " 内存: $(sysctl -n hw.memsize | awk '{printf "%.1f GB", $1/1024/1024/1024}')"
  77. # 启动 llama-server
  78. nohup llama-server \
  79. -m "$MODEL_PATH" \
  80. --mmproj "$MMPROJ_PATH" \
  81. --host $HOST \
  82. --port $PORT \
  83. --media-path $HOME/workspace \
  84. -c $CONTEXT_SIZE \
  85. -ngl $GPU_LAYERS \
  86. -t $THREADS \
  87. -b $BATCH_SIZE \
  88. -ub $UBATCH_SIZE \
  89. --temp 0 \
  90. > $LOGFILE 2>&1 &
  91. echo $! > $PIDFILE
  92. echo "✅ PaddleOCR-VL llama-server 已启动,PID: $(cat $PIDFILE)"
  93. echo "📋 日志文件: $LOGFILE"
  94. echo "🌐 服务 URL: http://$HOST:$PORT"
  95. echo "📖 OpenAI 兼容 API: http://localhost:$PORT/v1 (chat/completions, models)"
  96. echo ""
  97. echo "等待服务启动..."
  98. sleep 5
  99. status
  100. }
  101. stop() {
  102. if [ ! -f $PIDFILE ]; then
  103. echo "PaddleOCR-VL llama-server 未在运行"
  104. return 1
  105. fi
  106. PID=$(cat $PIDFILE)
  107. echo "停止 PaddleOCR-VL llama-server (PID: $PID)..."
  108. kill $PID
  109. for i in {1..30}; do
  110. if ! kill -0 $PID 2>/dev/null; then
  111. break
  112. fi
  113. echo "等待进程停止... ($i/30)"
  114. sleep 1
  115. done
  116. if kill -0 $PID 2>/dev/null; then
  117. echo "强制终止进程..."
  118. kill -9 $PID
  119. fi
  120. rm -f $PIDFILE
  121. echo "✅ PaddleOCR-VL llama-server 已停止"
  122. }
  123. status() {
  124. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  125. PID=$(cat $PIDFILE)
  126. echo "✅ PaddleOCR-VL llama-server 正在运行 (PID: $PID)"
  127. echo "🌐 服务 URL: http://$HOST:$PORT"
  128. echo "📋 日志文件: $LOGFILE"
  129. # 检查端口监听状态
  130. if lsof -nP -iTCP:$PORT -sTCP:LISTEN >/dev/null 2>&1; then
  131. echo "🔗 端口 $PORT 正在监听"
  132. else
  133. echo "⚠️ 端口 $PORT 未在监听(服务可能正在启动)"
  134. fi
  135. # 检查 API 响应
  136. if command -v curl >/dev/null 2>&1; then
  137. if curl -s --connect-timeout 2 http://127.0.0.1:$PORT/v1/models > /dev/null 2>&1; then
  138. echo "🎯 API 响应正常"
  139. else
  140. echo "⚠️ API 无响应(服务可能正在启动)"
  141. fi
  142. fi
  143. # 显示进程内存使用
  144. if command -v ps >/dev/null 2>&1; then
  145. MEM=$(ps -o rss= -p $PID 2>/dev/null | awk '{printf "%.2f GB", $1/1024/1024}')
  146. if [ -n "$MEM" ]; then
  147. echo "💾 内存使用: $MEM"
  148. fi
  149. fi
  150. if [ -f $LOGFILE ]; then
  151. echo "📄 最近日志(最后 3 行):"
  152. tail -3 $LOGFILE | sed 's/^/ /'
  153. fi
  154. else
  155. echo "❌ PaddleOCR-VL llama-server 未在运行"
  156. if [ -f $PIDFILE ]; then
  157. echo "删除过期的 PID 文件..."
  158. rm -f $PIDFILE
  159. fi
  160. fi
  161. }
  162. logs() {
  163. if [ -f $LOGFILE ]; then
  164. echo "📄 PaddleOCR-VL llama-server 日志:"
  165. echo "====================="
  166. tail -f $LOGFILE
  167. else
  168. echo "❌ 日志文件不存在: $LOGFILE"
  169. fi
  170. }
  171. config() {
  172. echo "📋 当前配置:"
  173. echo " Conda 环境: $CONDA_ENV"
  174. echo " Host: $HOST"
  175. echo " Port: $PORT"
  176. echo " 主模型路径: $MODEL_PATH"
  177. echo " 多模态投影器: $MMPROJ_PATH"
  178. echo " 上下文长度: $CONTEXT_SIZE"
  179. echo " GPU 层数: $GPU_LAYERS"
  180. echo " 线程数: $THREADS"
  181. echo " 批处理大小: $BATCH_SIZE"
  182. echo " 微批处理大小: $UBATCH_SIZE"
  183. echo " PID 文件: $PIDFILE"
  184. echo " 日志文件: $LOGFILE"
  185. echo ""
  186. echo "📦 模型文件检查:"
  187. if [ -f "$MODEL_PATH" ]; then
  188. SIZE=$(du -h "$MODEL_PATH" | cut -f1)
  189. echo " ✅ 主模型存在 ($SIZE)"
  190. else
  191. echo " ❌ 主模型不存在"
  192. fi
  193. if [ -f "$MMPROJ_PATH" ]; then
  194. SIZE=$(du -h "$MMPROJ_PATH" | cut -f1)
  195. echo " ✅ 多模态投影器存在 ($SIZE)"
  196. else
  197. echo " ❌ 多模态投影器不存在"
  198. fi
  199. echo ""
  200. echo "🔧 环境检查:"
  201. echo " llama-server: $(which llama-server 2>/dev/null || echo '未安装')"
  202. if command -v llama-server >/dev/null 2>&1; then
  203. LLAMA_VERSION=$(llama-server --version 2>&1 | head -1 || echo 'Unknown')
  204. echo " 版本: $LLAMA_VERSION"
  205. fi
  206. echo " Conda: $(which conda 2>/dev/null || echo '未找到')"
  207. echo " 当前 Python: $(which python 2>/dev/null || echo '未找到')"
  208. echo ""
  209. echo "💻 系统信息:"
  210. echo " 架构: $(uname -m)"
  211. echo " 系统版本: $(sw_vers -productVersion 2>/dev/null || echo 'Unknown')"
  212. echo " 总内存: $(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.1f GB", $1/1024/1024/1024}' || echo 'Unknown')"
  213. echo " CPU 核心: $(sysctl -n hw.ncpu 2>/dev/null || echo 'Unknown')"
  214. }
  215. test_api() {
  216. echo "🧪 测试 PaddleOCR-VL llama-server API..."
  217. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  218. echo "❌ PaddleOCR-VL llama-server 服务未在运行"
  219. return 1
  220. fi
  221. if ! command -v curl >/dev/null 2>&1; then
  222. echo "❌ curl 命令未找到"
  223. return 1
  224. fi
  225. echo "📡 测试 /v1/models 端点..."
  226. response=$(curl -s --connect-timeout 10 http://127.0.0.1:$PORT/v1/models)
  227. if [ $? -eq 0 ]; then
  228. echo "✅ Models 端点可访问"
  229. echo "$response" | python -m json.tool 2>/dev/null || echo "$response"
  230. else
  231. echo "❌ Models 端点不可访问"
  232. fi
  233. echo ""
  234. echo "📡 测试 /health 端点..."
  235. health=$(curl -s --connect-timeout 5 http://127.0.0.1:$PORT/health)
  236. if [ $? -eq 0 ]; then
  237. echo "✅ Health 端点: $health"
  238. else
  239. echo "⚠️ Health 端点不可访问"
  240. fi
  241. }
  242. test_client() {
  243. echo "🧪 测试 PaddleOCR-VL 与 llama-server 集成..."
  244. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  245. echo "❌ PaddleOCR-VL llama-server 服务未在运行,请先启动: $0 start"
  246. return 1
  247. fi
  248. CONFIG_FILE="/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser/config/bank_statement_paddleocr_local.yaml"
  249. echo "📄 配置文件: $CONFIG_FILE"
  250. echo ""
  251. echo "确保配置文件中 vl_recognition.api_url 指向: http://localhost:$PORT/v1/chat/completions"
  252. echo ""
  253. echo "测试命令示例:"
  254. echo " cd /Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser"
  255. echo " conda activate mineru2"
  256. echo " python parse.py --input /path/to/test/image.png --config $CONFIG_FILE --debug"
  257. echo ""
  258. echo "或者使用 curl 直接测试 API:"
  259. echo " curl -X POST http://localhost:$PORT/v1/chat/completions \\"
  260. echo " -H 'Content-Type: application/json' \\"
  261. echo " -d '{"
  262. echo " \"model\": \"paddleocr-vl\","
  263. echo " \"messages\": ["
  264. echo " {"
  265. echo " \"role\": \"user\","
  266. echo " \"content\": ["
  267. echo " {\"type\": \"text\", \"text\": \"Table Recognition:\"},"
  268. echo " {\"type\": \"image_url\", \"image_url\": {\"url\": \"file:///path/to/image.png\"}}"
  269. echo " ]"
  270. echo " }"
  271. echo " ],"
  272. echo " \"max_tokens\": 4096"
  273. echo " }'"
  274. }
  275. usage() {
  276. echo "PaddleOCR-VL llama-server 服务守护进程(macOS)"
  277. echo "==========================================="
  278. echo "用法: $0 {start|stop|restart|status|logs|config|test|test-client}"
  279. echo ""
  280. echo "命令:"
  281. echo " start - 启动 PaddleOCR-VL llama-server 服务"
  282. echo " stop - 停止 PaddleOCR-VL llama-server 服务"
  283. echo " restart - 重启 PaddleOCR-VL llama-server 服务"
  284. echo " status - 显示服务状态和资源使用"
  285. echo " logs - 显示服务日志(跟踪模式)"
  286. echo " config - 显示当前配置"
  287. echo " test - 测试 /v1/models API 端点"
  288. echo " test-client - 显示如何测试与配置文件集成"
  289. echo ""
  290. echo "配置(编辑脚本修改):"
  291. echo " Host: $HOST"
  292. echo " Port: $PORT"
  293. echo " 主模型: $MODEL_PATH"
  294. echo " 多模态投影器: $MMPROJ_PATH"
  295. echo " 上下文长度: $CONTEXT_SIZE"
  296. echo " GPU 层数: $GPU_LAYERS (Metal)"
  297. echo ""
  298. echo "示例:"
  299. echo " ./paddleocr_local_daemon.sh start"
  300. echo " ./paddleocr_local_daemon.sh status"
  301. echo " ./paddleocr_local_daemon.sh logs"
  302. echo " ./paddleocr_local_daemon.sh test"
  303. echo ""
  304. echo "前置要求:"
  305. echo " 1. 安装 llama.cpp: brew install llama.cpp"
  306. echo " 2. 模型文件位于: ~/Library/Caches/llama.cpp/"
  307. echo " 3. conda 环境 mineru2 已配置"
  308. }
  309. case "$1" in
  310. start)
  311. start
  312. ;;
  313. stop)
  314. stop
  315. ;;
  316. restart)
  317. stop
  318. sleep 3
  319. start
  320. ;;
  321. status)
  322. status
  323. ;;
  324. logs)
  325. logs
  326. ;;
  327. config)
  328. config
  329. ;;
  330. test)
  331. test_api
  332. ;;
  333. test-client)
  334. test_client
  335. ;;
  336. *)
  337. usage
  338. exit 1
  339. ;;
  340. esac