| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788 |
- services:
- mineru-vllm-server:
- image: mineru-vllm:latest
- container_name: mineru-vllm-server
- restart: always
- profiles: ["vllm-server"]
- ports:
- - 30000:30000
- environment:
- MINERU_MODEL_SOURCE: local
- entrypoint: mineru-vllm-server
- command:
- --host 0.0.0.0
- --port 30000
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- healthcheck:
- test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: ["0"]
- capabilities: [gpu]
- mineru-api:
- image: mineru-vllm:latest
- container_name: mineru-api
- restart: always
- profiles: ["api"]
- ports:
- - 8000:8000
- environment:
- MINERU_MODEL_SOURCE: local
- entrypoint: mineru-api
- command:
- --host 0.0.0.0
- --port 8000
- # parameters for vllm-engine
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: [ "0" ]
- capabilities: [ gpu ]
- mineru-gradio:
- image: mineru-vllm:latest
- container_name: mineru-gradio
- restart: always
- profiles: ["gradio"]
- ports:
- - 7860:7860
- environment:
- MINERU_MODEL_SOURCE: local
- entrypoint: mineru-gradio
- command:
- --server-name 0.0.0.0
- --server-port 7860
- --enable-vllm-engine true # Enable the vllm engine for Gradio
- # --enable-api false # If you want to disable the API, set this to false
- # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number
- # parameters for vllm-engine
- # --data-parallel-size 2 # If using multiple GPUs, increase throughput using vllm's multi-GPU parallel mode
- # --gpu-memory-utilization 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
- ulimits:
- memlock: -1
- stack: 67108864
- ipc: host
- deploy:
- resources:
- reservations:
- devices:
- - driver: nvidia
- device_ids: [ "0" ]
- capabilities: [ gpu ]
|