paddle_local_daemon.sh 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378
  1. #!/bin/bash
  2. # filepath: ocr_platform/ocr_tools/daemons/paddleocr_local_daemon.sh
  3. # 对应: PaddleOCR-VL 本地 llama-server 服务(macOS),使用 GGUF 格式模型
  4. # 适用于 Mac M4 Pro 48G,使用 Metal GPU 加速
  5. # 模型下载地址: https://huggingface.co/PaddlePaddle/PaddleOCR-VL-1.5-GGUF
  6. # curl -X POST http://localhost:8081/v1/chat/completions -d @payload.json
  7. LOGDIR="$HOME/workspace/logs"
  8. mkdir -p $LOGDIR
  9. PIDFILE="$LOGDIR/paddleocr_llamaserver.pid"
  10. LOGFILE="$LOGDIR/paddleocr_llamaserver.log"
  11. # 配置参数
  12. CONDA_ENV="mineru2"
  13. PORT="8081"
  14. HOST="0.0.0.0"
  15. # 本地 GGUF 模型路径
  16. MODEL_PATH="$HOME/Library/Caches/llama.cpp/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5.gguf"
  17. MMPROJ_PATH="$HOME/Library/Caches/llama.cpp/PaddlePaddle_PaddleOCR-VL-1.5-GGUF_PaddleOCR-VL-1.5-mmproj.gguf"
  18. # llama-server 参数
  19. CONTEXT_SIZE="16384" # 上下文长度(需 >= max_tokens,推荐 8192-16384)
  20. GPU_LAYERS="99" # Metal GPU 层数(99 表示全部)
  21. THREADS="8" # CPU 线程数(M4 Pro 建议值)
  22. BATCH_SIZE="512" # 批处理大小
  23. UBATCH_SIZE="128" # 微批处理大小
  24. # conda 环境激活
  25. if [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
  26. source "$HOME/anaconda3/etc/profile.d/conda.sh"
  27. conda activate $CONDA_ENV
  28. elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then
  29. source "$HOME/miniconda3/etc/profile.d/conda.sh"
  30. conda activate $CONDA_ENV
  31. elif [ -f "/opt/miniconda3/etc/profile.d/conda.sh" ]; then
  32. source /opt/miniconda3/etc/profile.d/conda.sh
  33. conda activate $CONDA_ENV
  34. else
  35. echo "Warning: conda initialization file not found, trying direct path"
  36. export PATH="/opt/miniconda3/envs/$CONDA_ENV/bin:$PATH"
  37. fi
  38. start() {
  39. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  40. echo "PaddleOCR-VL llama-server 已在运行"
  41. return 1
  42. fi
  43. echo "启动 PaddleOCR-VL llama-server 守护进程..."
  44. echo "Host: $HOST, Port: $PORT"
  45. echo "主模型: $MODEL_PATH"
  46. echo "多模态投影器: $MMPROJ_PATH"
  47. echo "上下文长度: $CONTEXT_SIZE"
  48. echo "GPU 层数: $GPU_LAYERS (Metal)"
  49. echo "线程数: $THREADS"
  50. # 检查模型文件是否存在
  51. if [ ! -f "$MODEL_PATH" ]; then
  52. echo "❌ 主模型文件不存在: $MODEL_PATH"
  53. echo "请确认模型已下载到 llama.cpp 缓存目录"
  54. return 1
  55. fi
  56. if [ ! -f "$MMPROJ_PATH" ]; then
  57. echo "❌ 多模态投影器文件不存在: $MMPROJ_PATH"
  58. echo "请确认 mmproj 文件已下载"
  59. return 1
  60. fi
  61. # 检查 llama-server 命令
  62. if ! command -v llama-server >/dev/null 2>&1; then
  63. echo "❌ llama-server 未找到"
  64. echo "请安装: brew install llama.cpp"
  65. return 1
  66. fi
  67. echo "🔧 使用 llama-server: $(which llama-server)"
  68. echo "🔧 llama.cpp 版本: $(llama-server --version 2>&1 | head -1 || echo 'Unknown')"
  69. echo "💻 系统信息:"
  70. echo " 架构: $(uname -m)"
  71. echo " 系统: $(uname -s)"
  72. echo " 内存: $(sysctl -n hw.memsize | awk '{printf "%.1f GB", $1/1024/1024/1024}')"
  73. # 启动 llama-server
  74. nohup llama-server \
  75. -m "$MODEL_PATH" \
  76. --mmproj "$MMPROJ_PATH" \
  77. --host $HOST \
  78. --port $PORT \
  79. --media-path /Users/zhch158/workspace \
  80. -c $CONTEXT_SIZE \
  81. -ngl $GPU_LAYERS \
  82. -t $THREADS \
  83. -b $BATCH_SIZE \
  84. -ub $UBATCH_SIZE \
  85. --temp 0 \
  86. > $LOGFILE 2>&1 &
  87. echo $! > $PIDFILE
  88. echo "✅ PaddleOCR-VL llama-server 已启动,PID: $(cat $PIDFILE)"
  89. echo "📋 日志文件: $LOGFILE"
  90. echo "🌐 服务 URL: http://$HOST:$PORT"
  91. echo "📖 OpenAI 兼容 API: http://localhost:$PORT/v1 (chat/completions, models)"
  92. echo ""
  93. echo "等待服务启动..."
  94. sleep 5
  95. status
  96. }
  97. stop() {
  98. if [ ! -f $PIDFILE ]; then
  99. echo "PaddleOCR-VL llama-server 未在运行"
  100. return 1
  101. fi
  102. PID=$(cat $PIDFILE)
  103. echo "停止 PaddleOCR-VL llama-server (PID: $PID)..."
  104. kill $PID
  105. for i in {1..30}; do
  106. if ! kill -0 $PID 2>/dev/null; then
  107. break
  108. fi
  109. echo "等待进程停止... ($i/30)"
  110. sleep 1
  111. done
  112. if kill -0 $PID 2>/dev/null; then
  113. echo "强制终止进程..."
  114. kill -9 $PID
  115. fi
  116. rm -f $PIDFILE
  117. echo "✅ PaddleOCR-VL llama-server 已停止"
  118. }
  119. status() {
  120. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  121. PID=$(cat $PIDFILE)
  122. echo "✅ PaddleOCR-VL llama-server 正在运行 (PID: $PID)"
  123. echo "🌐 服务 URL: http://$HOST:$PORT"
  124. echo "📋 日志文件: $LOGFILE"
  125. # 检查端口监听状态
  126. if lsof -nP -iTCP:$PORT -sTCP:LISTEN >/dev/null 2>&1; then
  127. echo "🔗 端口 $PORT 正在监听"
  128. else
  129. echo "⚠️ 端口 $PORT 未在监听(服务可能正在启动)"
  130. fi
  131. # 检查 API 响应
  132. if command -v curl >/dev/null 2>&1; then
  133. if curl -s --connect-timeout 2 http://127.0.0.1:$PORT/v1/models > /dev/null 2>&1; then
  134. echo "🎯 API 响应正常"
  135. else
  136. echo "⚠️ API 无响应(服务可能正在启动)"
  137. fi
  138. fi
  139. # 显示进程内存使用
  140. if command -v ps >/dev/null 2>&1; then
  141. MEM=$(ps -o rss= -p $PID 2>/dev/null | awk '{printf "%.2f GB", $1/1024/1024}')
  142. if [ -n "$MEM" ]; then
  143. echo "💾 内存使用: $MEM"
  144. fi
  145. fi
  146. if [ -f $LOGFILE ]; then
  147. echo "📄 最近日志(最后 3 行):"
  148. tail -3 $LOGFILE | sed 's/^/ /'
  149. fi
  150. else
  151. echo "❌ PaddleOCR-VL llama-server 未在运行"
  152. if [ -f $PIDFILE ]; then
  153. echo "删除过期的 PID 文件..."
  154. rm -f $PIDFILE
  155. fi
  156. fi
  157. }
  158. logs() {
  159. if [ -f $LOGFILE ]; then
  160. echo "📄 PaddleOCR-VL llama-server 日志:"
  161. echo "====================="
  162. tail -f $LOGFILE
  163. else
  164. echo "❌ 日志文件不存在: $LOGFILE"
  165. fi
  166. }
  167. config() {
  168. echo "📋 当前配置:"
  169. echo " Conda 环境: $CONDA_ENV"
  170. echo " Host: $HOST"
  171. echo " Port: $PORT"
  172. echo " 主模型路径: $MODEL_PATH"
  173. echo " 多模态投影器: $MMPROJ_PATH"
  174. echo " 上下文长度: $CONTEXT_SIZE"
  175. echo " GPU 层数: $GPU_LAYERS"
  176. echo " 线程数: $THREADS"
  177. echo " 批处理大小: $BATCH_SIZE"
  178. echo " 微批处理大小: $UBATCH_SIZE"
  179. echo " PID 文件: $PIDFILE"
  180. echo " 日志文件: $LOGFILE"
  181. echo ""
  182. echo "📦 模型文件检查:"
  183. if [ -f "$MODEL_PATH" ]; then
  184. SIZE=$(du -h "$MODEL_PATH" | cut -f1)
  185. echo " ✅ 主模型存在 ($SIZE)"
  186. else
  187. echo " ❌ 主模型不存在"
  188. fi
  189. if [ -f "$MMPROJ_PATH" ]; then
  190. SIZE=$(du -h "$MMPROJ_PATH" | cut -f1)
  191. echo " ✅ 多模态投影器存在 ($SIZE)"
  192. else
  193. echo " ❌ 多模态投影器不存在"
  194. fi
  195. echo ""
  196. echo "🔧 环境检查:"
  197. echo " llama-server: $(which llama-server 2>/dev/null || echo '未安装')"
  198. if command -v llama-server >/dev/null 2>&1; then
  199. LLAMA_VERSION=$(llama-server --version 2>&1 | head -1 || echo 'Unknown')
  200. echo " 版本: $LLAMA_VERSION"
  201. fi
  202. echo " Conda: $(which conda 2>/dev/null || echo '未找到')"
  203. echo " 当前 Python: $(which python 2>/dev/null || echo '未找到')"
  204. echo ""
  205. echo "💻 系统信息:"
  206. echo " 架构: $(uname -m)"
  207. echo " 系统版本: $(sw_vers -productVersion 2>/dev/null || echo 'Unknown')"
  208. echo " 总内存: $(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.1f GB", $1/1024/1024/1024}' || echo 'Unknown')"
  209. echo " CPU 核心: $(sysctl -n hw.ncpu 2>/dev/null || echo 'Unknown')"
  210. }
  211. test_api() {
  212. echo "🧪 测试 PaddleOCR-VL llama-server API..."
  213. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  214. echo "❌ PaddleOCR-VL llama-server 服务未在运行"
  215. return 1
  216. fi
  217. if ! command -v curl >/dev/null 2>&1; then
  218. echo "❌ curl 命令未找到"
  219. return 1
  220. fi
  221. echo "📡 测试 /v1/models 端点..."
  222. response=$(curl -s --connect-timeout 10 http://127.0.0.1:$PORT/v1/models)
  223. if [ $? -eq 0 ]; then
  224. echo "✅ Models 端点可访问"
  225. echo "$response" | python -m json.tool 2>/dev/null || echo "$response"
  226. else
  227. echo "❌ Models 端点不可访问"
  228. fi
  229. echo ""
  230. echo "📡 测试 /health 端点..."
  231. health=$(curl -s --connect-timeout 5 http://127.0.0.1:$PORT/health)
  232. if [ $? -eq 0 ]; then
  233. echo "✅ Health 端点: $health"
  234. else
  235. echo "⚠️ Health 端点不可访问"
  236. fi
  237. }
  238. test_client() {
  239. echo "🧪 测试 PaddleOCR-VL 与 llama-server 集成..."
  240. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  241. echo "❌ PaddleOCR-VL llama-server 服务未在运行,请先启动: $0 start"
  242. return 1
  243. fi
  244. CONFIG_FILE="/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser/config/bank_statement_paddleocr_local.yaml"
  245. echo "📄 配置文件: $CONFIG_FILE"
  246. echo ""
  247. echo "确保配置文件中 vl_recognition.api_url 指向: http://localhost:$PORT/v1/chat/completions"
  248. echo ""
  249. echo "测试命令示例:"
  250. echo " cd /Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser"
  251. echo " conda activate mineru2"
  252. echo " python parse.py --input /path/to/test/image.png --config $CONFIG_FILE --debug"
  253. echo ""
  254. echo "或者使用 curl 直接测试 API:"
  255. echo " curl -X POST http://localhost:$PORT/v1/chat/completions \\"
  256. echo " -H 'Content-Type: application/json' \\"
  257. echo " -d '{"
  258. echo " \"model\": \"paddleocr-vl\","
  259. echo " \"messages\": ["
  260. echo " {"
  261. echo " \"role\": \"user\","
  262. echo " \"content\": ["
  263. echo " {\"type\": \"text\", \"text\": \"Table Recognition:\"},"
  264. echo " {\"type\": \"image_url\", \"image_url\": {\"url\": \"file:///path/to/image.png\"}}"
  265. echo " ]"
  266. echo " }"
  267. echo " ],"
  268. echo " \"max_tokens\": 4096"
  269. echo " }'"
  270. }
  271. usage() {
  272. echo "PaddleOCR-VL llama-server 服务守护进程(macOS)"
  273. echo "==========================================="
  274. echo "用法: $0 {start|stop|restart|status|logs|config|test|test-client}"
  275. echo ""
  276. echo "命令:"
  277. echo " start - 启动 PaddleOCR-VL llama-server 服务"
  278. echo " stop - 停止 PaddleOCR-VL llama-server 服务"
  279. echo " restart - 重启 PaddleOCR-VL llama-server 服务"
  280. echo " status - 显示服务状态和资源使用"
  281. echo " logs - 显示服务日志(跟踪模式)"
  282. echo " config - 显示当前配置"
  283. echo " test - 测试 /v1/models API 端点"
  284. echo " test-client - 显示如何测试与配置文件集成"
  285. echo ""
  286. echo "配置(编辑脚本修改):"
  287. echo " Host: $HOST"
  288. echo " Port: $PORT"
  289. echo " 主模型: $MODEL_PATH"
  290. echo " 多模态投影器: $MMPROJ_PATH"
  291. echo " 上下文长度: $CONTEXT_SIZE"
  292. echo " GPU 层数: $GPU_LAYERS (Metal)"
  293. echo ""
  294. echo "示例:"
  295. echo " ./paddleocr_local_daemon.sh start"
  296. echo " ./paddleocr_local_daemon.sh status"
  297. echo " ./paddleocr_local_daemon.sh logs"
  298. echo " ./paddleocr_local_daemon.sh test"
  299. echo ""
  300. echo "前置要求:"
  301. echo " 1. 安装 llama.cpp: brew install llama.cpp"
  302. echo " 2. 模型文件位于: ~/Library/Caches/llama.cpp/"
  303. echo " 3. conda 环境 mineru2 已配置"
  304. }
  305. case "$1" in
  306. start)
  307. start
  308. ;;
  309. stop)
  310. stop
  311. ;;
  312. restart)
  313. stop
  314. sleep 3
  315. start
  316. ;;
  317. status)
  318. status
  319. ;;
  320. logs)
  321. logs
  322. ;;
  323. config)
  324. config
  325. ;;
  326. test)
  327. test_api
  328. ;;
  329. test-client)
  330. test_client
  331. ;;
  332. *)
  333. usage
  334. exit 1
  335. ;;
  336. esac