glmocr_local_daemon.sh 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. #!/bin/bash
  2. # filepath: ocr_platform/ocr_tools/daemons/glmocr_local_daemon.sh
  3. # 对应: GLM-OCR 本地 llama-server 服务(macOS),使用 GGUF 格式模型
  4. # 适用于 Mac M4 Pro 48G,使用 Metal GPU 加速
  5. # 模型下载地址: https://huggingface.co/ggml-org/GLM-OCR-GGUF
  6. # 模型下载地址: https://huggingface.co/PaddlePaddle/PaddleOCR-VL-1.5-GGUF
  7. # curl -X POST http://localhost:8080/v1/chat/completions -d @payload.json
  8. LOGDIR="$HOME/workspace/logs"
  9. mkdir -p $LOGDIR
  10. PIDFILE="$LOGDIR/glmocr_llamaserver.pid"
  11. LOGFILE="$LOGDIR/glmocr_llamaserver.log"
  12. # 配置参数
  13. CONDA_ENV="mineru2"
  14. PORT="8080"
  15. HOST="0.0.0.0"
  16. # 本地 GGUF 模型路径
  17. MODEL_PATH="$HOME/Library/Caches/llama.cpp/ggml-org_GLM-OCR-GGUF_GLM-OCR-Q8_0.gguf"
  18. MMPROJ_PATH="$HOME/Library/Caches/llama.cpp/ggml-org_GLM-OCR-GGUF_mmproj-GLM-OCR-Q8_0.gguf"
  19. # llama-server 参数
  20. CONTEXT_SIZE="16384" # 上下文长度(需 >= max_tokens,推荐 8192-16384)
  21. GPU_LAYERS="99" # Metal GPU 层数(99 表示全部)
  22. THREADS="8" # CPU 线程数(M4 Pro 建议值)
  23. BATCH_SIZE="512" # 批处理大小
  24. UBATCH_SIZE="128" # 微批处理大小
  25. # conda 环境激活
  26. if [ -f "$HOME/anaconda3/etc/profile.d/conda.sh" ]; then
  27. source "$HOME/anaconda3/etc/profile.d/conda.sh"
  28. conda activate $CONDA_ENV
  29. elif [ -f "$HOME/miniconda3/etc/profile.d/conda.sh" ]; then
  30. source "$HOME/miniconda3/etc/profile.d/conda.sh"
  31. conda activate $CONDA_ENV
  32. elif [ -f "/opt/miniconda3/etc/profile.d/conda.sh" ]; then
  33. source /opt/miniconda3/etc/profile.d/conda.sh
  34. conda activate $CONDA_ENV
  35. else
  36. echo "Warning: conda initialization file not found, trying direct path"
  37. export PATH="/opt/miniconda3/envs/$CONDA_ENV/bin:$PATH"
  38. fi
  39. start() {
  40. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  41. echo "GLM-OCR llama-server 已在运行"
  42. return 1
  43. fi
  44. echo "启动 GLM-OCR llama-server 守护进程..."
  45. echo "Host: $HOST, Port: $PORT"
  46. echo "主模型: $MODEL_PATH"
  47. echo "多模态投影器: $MMPROJ_PATH"
  48. echo "上下文长度: $CONTEXT_SIZE"
  49. echo "GPU 层数: $GPU_LAYERS (Metal)"
  50. echo "线程数: $THREADS"
  51. # 检查模型文件是否存在
  52. if [ ! -f "$MODEL_PATH" ]; then
  53. echo "❌ 主模型文件不存在: $MODEL_PATH"
  54. echo "请确认模型已下载到 llama.cpp 缓存目录"
  55. return 1
  56. fi
  57. if [ ! -f "$MMPROJ_PATH" ]; then
  58. echo "❌ 多模态投影器文件不存在: $MMPROJ_PATH"
  59. echo "请确认 mmproj 文件已下载"
  60. return 1
  61. fi
  62. # 检查 llama-server 命令
  63. if ! command -v llama-server >/dev/null 2>&1; then
  64. echo "❌ llama-server 未找到"
  65. echo "请安装: brew install llama.cpp"
  66. return 1
  67. fi
  68. echo "🔧 使用 llama-server: $(which llama-server)"
  69. echo "🔧 llama.cpp 版本: $(llama-server --version 2>&1 | head -1 || echo 'Unknown')"
  70. echo "💻 系统信息:"
  71. echo " 架构: $(uname -m)"
  72. echo " 系统: $(uname -s)"
  73. echo " 内存: $(sysctl -n hw.memsize | awk '{printf "%.1f GB", $1/1024/1024/1024}')"
  74. # 启动 llama-server
  75. # --log-disable \
  76. nohup llama-server \
  77. -m "$MODEL_PATH" \
  78. --mmproj "$MMPROJ_PATH" \
  79. --host $HOST \
  80. --port $PORT \
  81. --media-path /Users/zhch158/workspace \
  82. -c $CONTEXT_SIZE \
  83. -ngl $GPU_LAYERS \
  84. -t $THREADS \
  85. -b $BATCH_SIZE \
  86. -ub $UBATCH_SIZE \
  87. --temp 0 \
  88. > $LOGFILE 2>&1 &
  89. echo $! > $PIDFILE
  90. echo "✅ GLM-OCR llama-server 已启动,PID: $(cat $PIDFILE)"
  91. echo "📋 日志文件: $LOGFILE"
  92. echo "🌐 服务 URL: http://$HOST:$PORT"
  93. echo "📖 OpenAI 兼容 API: http://localhost:$PORT/v1 (chat/completions, models)"
  94. echo ""
  95. echo "等待服务启动..."
  96. sleep 5
  97. status
  98. }
  99. stop() {
  100. if [ ! -f $PIDFILE ]; then
  101. echo "GLM-OCR llama-server 未在运行"
  102. return 1
  103. fi
  104. PID=$(cat $PIDFILE)
  105. echo "停止 GLM-OCR llama-server (PID: $PID)..."
  106. kill $PID
  107. for i in {1..30}; do
  108. if ! kill -0 $PID 2>/dev/null; then
  109. break
  110. fi
  111. echo "等待进程停止... ($i/30)"
  112. sleep 1
  113. done
  114. if kill -0 $PID 2>/dev/null; then
  115. echo "强制终止进程..."
  116. kill -9 $PID
  117. fi
  118. rm -f $PIDFILE
  119. echo "✅ GLM-OCR llama-server 已停止"
  120. }
  121. status() {
  122. if [ -f $PIDFILE ] && kill -0 $(cat $PIDFILE) 2>/dev/null; then
  123. PID=$(cat $PIDFILE)
  124. echo "✅ GLM-OCR llama-server 正在运行 (PID: $PID)"
  125. echo "🌐 服务 URL: http://$HOST:$PORT"
  126. echo "📋 日志文件: $LOGFILE"
  127. # 检查端口监听状态
  128. if lsof -nP -iTCP:$PORT -sTCP:LISTEN >/dev/null 2>&1; then
  129. echo "🔗 端口 $PORT 正在监听"
  130. else
  131. echo "⚠️ 端口 $PORT 未在监听(服务可能正在启动)"
  132. fi
  133. # 检查 API 响应
  134. if command -v curl >/dev/null 2>&1; then
  135. if curl -s --connect-timeout 2 http://127.0.0.1:$PORT/v1/models > /dev/null 2>&1; then
  136. echo "🎯 API 响应正常"
  137. else
  138. echo "⚠️ API 无响应(服务可能正在启动)"
  139. fi
  140. fi
  141. # 显示进程内存使用
  142. if command -v ps >/dev/null 2>&1; then
  143. MEM=$(ps -o rss= -p $PID 2>/dev/null | awk '{printf "%.2f GB", $1/1024/1024}')
  144. if [ -n "$MEM" ]; then
  145. echo "💾 内存使用: $MEM"
  146. fi
  147. fi
  148. if [ -f $LOGFILE ]; then
  149. echo "📄 最近日志(最后 3 行):"
  150. tail -3 $LOGFILE | sed 's/^/ /'
  151. fi
  152. else
  153. echo "❌ GLM-OCR llama-server 未在运行"
  154. if [ -f $PIDFILE ]; then
  155. echo "删除过期的 PID 文件..."
  156. rm -f $PIDFILE
  157. fi
  158. fi
  159. }
  160. logs() {
  161. if [ -f $LOGFILE ]; then
  162. echo "📄 GLM-OCR llama-server 日志:"
  163. echo "====================="
  164. tail -f $LOGFILE
  165. else
  166. echo "❌ 日志文件不存在: $LOGFILE"
  167. fi
  168. }
  169. config() {
  170. echo "📋 当前配置:"
  171. echo " Conda 环境: $CONDA_ENV"
  172. echo " Host: $HOST"
  173. echo " Port: $PORT"
  174. echo " 主模型路径: $MODEL_PATH"
  175. echo " 多模态投影器: $MMPROJ_PATH"
  176. echo " 上下文长度: $CONTEXT_SIZE"
  177. echo " GPU 层数: $GPU_LAYERS"
  178. echo " 线程数: $THREADS"
  179. echo " 批处理大小: $BATCH_SIZE"
  180. echo " 微批处理大小: $UBATCH_SIZE"
  181. echo " PID 文件: $PIDFILE"
  182. echo " 日志文件: $LOGFILE"
  183. echo ""
  184. echo "📦 模型文件检查:"
  185. if [ -f "$MODEL_PATH" ]; then
  186. SIZE=$(du -h "$MODEL_PATH" | cut -f1)
  187. echo " ✅ 主模型存在 ($SIZE)"
  188. else
  189. echo " ❌ 主模型不存在"
  190. fi
  191. if [ -f "$MMPROJ_PATH" ]; then
  192. SIZE=$(du -h "$MMPROJ_PATH" | cut -f1)
  193. echo " ✅ 多模态投影器存在 ($SIZE)"
  194. else
  195. echo " ❌ 多模态投影器不存在"
  196. fi
  197. echo ""
  198. echo "🔧 环境检查:"
  199. echo " llama-server: $(which llama-server 2>/dev/null || echo '未安装')"
  200. if command -v llama-server >/dev/null 2>&1; then
  201. LLAMA_VERSION=$(llama-server --version 2>&1 | head -1 || echo 'Unknown')
  202. echo " 版本: $LLAMA_VERSION"
  203. fi
  204. echo " Conda: $(which conda 2>/dev/null || echo '未找到')"
  205. echo " 当前 Python: $(which python 2>/dev/null || echo '未找到')"
  206. echo ""
  207. echo "💻 系统信息:"
  208. echo " 架构: $(uname -m)"
  209. echo " 系统版本: $(sw_vers -productVersion 2>/dev/null || echo 'Unknown')"
  210. echo " 总内存: $(sysctl -n hw.memsize 2>/dev/null | awk '{printf "%.1f GB", $1/1024/1024/1024}' || echo 'Unknown')"
  211. echo " CPU 核心: $(sysctl -n hw.ncpu 2>/dev/null || echo 'Unknown')"
  212. }
  213. test_api() {
  214. echo "🧪 测试 GLM-OCR llama-server API..."
  215. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  216. echo "❌ GLM-OCR llama-server 服务未在运行"
  217. return 1
  218. fi
  219. if ! command -v curl >/dev/null 2>&1; then
  220. echo "❌ curl 命令未找到"
  221. return 1
  222. fi
  223. echo "📡 测试 /v1/models 端点..."
  224. response=$(curl -s --connect-timeout 10 http://127.0.0.1:$PORT/v1/models)
  225. if [ $? -eq 0 ]; then
  226. echo "✅ Models 端点可访问"
  227. echo "$response" | python -m json.tool 2>/dev/null || echo "$response"
  228. else
  229. echo "❌ Models 端点不可访问"
  230. fi
  231. echo ""
  232. echo "📡 测试 /health 端点..."
  233. health=$(curl -s --connect-timeout 5 http://127.0.0.1:$PORT/health)
  234. if [ $? -eq 0 ]; then
  235. echo "✅ Health 端点: $health"
  236. else
  237. echo "⚠️ Health 端点不可访问"
  238. fi
  239. }
  240. test_client() {
  241. echo "🧪 测试 GLM-OCR 与 llama-server 集成..."
  242. if [ ! -f $PIDFILE ] || ! kill -0 $(cat $PIDFILE) 2>/dev/null; then
  243. echo "❌ GLM-OCR llama-server 服务未在运行,请先启动: $0 start"
  244. return 1
  245. fi
  246. CONFIG_FILE="/Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser/config/bank_statement_yusys_local.yaml"
  247. echo "📄 配置文件: $CONFIG_FILE"
  248. echo ""
  249. echo "确保配置文件中 vl_recognition.api_url 指向: http://localhost:$PORT/v1/chat/completions"
  250. echo ""
  251. echo "测试命令示例:"
  252. echo " cd /Users/zhch158/workspace/repository.git/ocr_platform/ocr_tools/universal_doc_parser"
  253. echo " conda activate mineru2"
  254. echo " python parse.py --input /path/to/test/image.png --config $CONFIG_FILE --debug"
  255. echo ""
  256. echo "或者使用 curl 直接测试 API:"
  257. echo " curl -X POST http://localhost:$PORT/v1/chat/completions \\"
  258. echo " -H 'Content-Type: application/json' \\"
  259. echo " -d '{"
  260. echo " \"model\": \"glm-ocr\","
  261. echo " \"messages\": ["
  262. echo " {"
  263. echo " \"role\": \"user\","
  264. echo " \"content\": ["
  265. echo " {\"type\": \"text\", \"text\": \"Table Recognition:\"},"
  266. echo " {\"type\": \"image_url\", \"image_url\": {\"url\": \"file:///path/to/image.png\"}}"
  267. echo " ]"
  268. echo " }"
  269. echo " ],"
  270. echo " \"max_tokens\": 4096"
  271. echo " }'"
  272. }
  273. usage() {
  274. echo "GLM-OCR llama-server 服务守护进程(macOS)"
  275. echo "==========================================="
  276. echo "用法: $0 {start|stop|restart|status|logs|config|test|test-client}"
  277. echo ""
  278. echo "命令:"
  279. echo " start - 启动 GLM-OCR llama-server 服务"
  280. echo " stop - 停止 GLM-OCR llama-server 服务"
  281. echo " restart - 重启 GLM-OCR llama-server 服务"
  282. echo " status - 显示服务状态和资源使用"
  283. echo " logs - 显示服务日志(跟踪模式)"
  284. echo " config - 显示当前配置"
  285. echo " test - 测试 /v1/models API 端点"
  286. echo " test-client - 显示如何测试与配置文件集成"
  287. echo ""
  288. echo "配置(编辑脚本修改):"
  289. echo " Host: $HOST"
  290. echo " Port: $PORT"
  291. echo " 主模型: $MODEL_PATH"
  292. echo " 多模态投影器: $MMPROJ_PATH"
  293. echo " 上下文长度: $CONTEXT_SIZE"
  294. echo " GPU 层数: $GPU_LAYERS (Metal)"
  295. echo ""
  296. echo "示例:"
  297. echo " ./glmocr_local_daemon.sh start"
  298. echo " ./glmocr_local_daemon.sh status"
  299. echo " ./glmocr_local_daemon.sh logs"
  300. echo " ./glmocr_local_daemon.sh test"
  301. echo ""
  302. echo "前置要求:"
  303. echo " 1. 安装 llama.cpp: brew install llama.cpp"
  304. echo " 2. 模型文件位于: ~/Library/Caches/llama.cpp/"
  305. echo " 3. conda 环境 mineru2 已配置"
  306. }
  307. case "$1" in
  308. start)
  309. start
  310. ;;
  311. stop)
  312. stop
  313. ;;
  314. restart)
  315. stop
  316. sleep 3
  317. start
  318. ;;
  319. status)
  320. status
  321. ;;
  322. logs)
  323. logs
  324. ;;
  325. config)
  326. config
  327. ;;
  328. test)
  329. test_api
  330. ;;
  331. test-client)
  332. test_client
  333. ;;
  334. *)
  335. usage
  336. exit 1
  337. ;;
  338. esac