compose.yaml 3.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394
  1. services:
  2. mineru-sglang-server:
  3. image: mineru-sglang:latest
  4. container_name: mineru-sglang-server
  5. restart: always
  6. profiles: ["sglang-server"]
  7. ports:
  8. - 30000:30000
  9. environment:
  10. MINERU_MODEL_SOURCE: local
  11. entrypoint: mineru-sglang-server
  12. command:
  13. --host 0.0.0.0
  14. --port 30000
  15. # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15%
  16. # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode
  17. # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode.
  18. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  19. ulimits:
  20. memlock: -1
  21. stack: 67108864
  22. ipc: host
  23. healthcheck:
  24. test: ["CMD-SHELL", "curl -f http://localhost:30000/health || exit 1"]
  25. deploy:
  26. resources:
  27. reservations:
  28. devices:
  29. - driver: nvidia
  30. device_ids: ["0"]
  31. capabilities: [gpu]
  32. mineru-api:
  33. image: mineru-sglang:latest
  34. container_name: mineru-api
  35. restart: always
  36. profiles: ["api"]
  37. ports:
  38. - 8000:8000
  39. environment:
  40. MINERU_MODEL_SOURCE: local
  41. entrypoint: mineru-api
  42. command:
  43. --host 0.0.0.0
  44. --port 8000
  45. # parameters for sglang-engine
  46. # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15%
  47. # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode
  48. # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode.
  49. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  50. ulimits:
  51. memlock: -1
  52. stack: 67108864
  53. ipc: host
  54. deploy:
  55. resources:
  56. reservations:
  57. devices:
  58. - driver: nvidia
  59. device_ids: [ "0" ]
  60. capabilities: [ gpu ]
  61. mineru-gradio:
  62. image: mineru-sglang:latest
  63. container_name: mineru-gradio
  64. restart: always
  65. profiles: ["gradio"]
  66. ports:
  67. - 7860:7860
  68. environment:
  69. MINERU_MODEL_SOURCE: local
  70. entrypoint: mineru-gradio
  71. command:
  72. --server-name 0.0.0.0
  73. --server-port 7860
  74. --enable-sglang-engine true # Enable the sglang engine for Gradio
  75. # --enable-api false # If you want to disable the API, set this to false
  76. # --max-convert-pages 20 # If you want to limit the number of pages for conversion, set this to a specific number
  77. # parameters for sglang-engine
  78. # --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15%
  79. # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode
  80. # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode.
  81. # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
  82. ulimits:
  83. memlock: -1
  84. stack: 67108864
  85. ipc: host
  86. deploy:
  87. resources:
  88. reservations:
  89. devices:
  90. - driver: nvidia
  91. device_ids: [ "0" ]
  92. capabilities: [ gpu ]