services: mineru-sglang-server: image: mineru-sglang:latest container_name: mineru-sglang-server restart: always profiles: ["sglang-server"] ports: - 30000:30000 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-sglang-server command: --host 0.0.0.0 --port 30000 # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15% # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host healthcheck: test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"] deploy: resources: reservations: devices: - driver: nvidia device_ids: ["0"] capabilities: [gpu] mineru-api: image: mineru-sglang:latest container_name: mineru-api restart: always profiles: ["api"] ports: - 8000:8000 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-api command: --host 0.0.0.0 --port 8000 # parameters for sglang-engine # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15% # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host deploy: resources: reservations: devices: - driver: nvidia device_ids: [ "0" ] capabilities: [ gpu ] mineru-gradio: image: mineru-sglang:latest container_name: mineru-gradio restart: always profiles: ["gradio"] ports: - 7860:7860 environment: MINERU_MODEL_SOURCE: local entrypoint: mineru-gradio command: --server-name 0.0.0.0 --server-port 7860 --enable-sglang-engine true # Enable the sglang engine for Gradio # --enable-api false # If you want to disable the API, set this to false # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number # parameters for sglang-engine # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15% # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below. ulimits: memlock: -1 stack: 67108864 ipc: host deploy: resources: reservations: devices: - driver: nvidia device_ids: [ "0" ] capabilities: [ gpu ]