|
|
@@ -14,9 +14,9 @@ services:
|
|
|
--host 0.0.0.0
|
|
|
--port 30000
|
|
|
# --enable-torch-compile # You can also enable torch.compile to accelerate inference speed by approximately 15%
|
|
|
- # --dp 2 # If you have more than two GPUs with 24GB VRAM or above, you can use sglang's multi-GPU parallel mode to increase throughput
|
|
|
- # --tp 2 # If you have two GPUs with 12GB or 16GB VRAM, you can use the Tensor Parallel (TP) mode
|
|
|
- # --mem-fraction-static 0.7 # If you have two GPUs with 11GB VRAM, in addition to Tensor Parallel mode, you need to reduce the KV cache size
|
|
|
+ # --dp-size 2 # If using multiple GPUs, increase throughput using sglang's multi-GPU parallel mode
|
|
|
+ # --tp-size 2 # If you have more than one GPU, you can expand available VRAM using tensor parallelism (TP) mode.
|
|
|
+ # --mem-fraction-static 0.5 # If running on a single GPU and encountering VRAM shortage, reduce the KV cache size by this parameter, if VRAM issues persist, try lowering it further to `0.4` or below.
|
|
|
ulimits:
|
|
|
memlock: -1
|
|
|
stack: 67108864
|