diff --git a/services/vllm/config_bge.yaml b/services/vllm/config_bge.yaml index 1384ad5..f1a8c15 100644 --- a/services/vllm/config_bge.yaml +++ b/services/vllm/config_bge.yaml @@ -2,6 +2,6 @@ model: BAAI/bge-large-en-v1.5 host: "0.0.0.0" port: 8002 uvicorn-log-level: "info" -gpu-memory-utilization: 0.10 +gpu-memory-utilization: 0.08 dtype: float16 task: embed diff --git a/services/vllm/config_llama.yaml b/services/vllm/config_llama.yaml index b08af74..e0d7e25 100644 --- a/services/vllm/config_llama.yaml +++ b/services/vllm/config_llama.yaml @@ -2,8 +2,8 @@ model: meta-llama/Llama-3.1-8B-Instruct host: "0.0.0.0" port: 8001 uvicorn-log-level: "info" -gpu-memory-utilization: 0.85 -max-model-len: 8192 +gpu-memory-utilization: 0.90 +max-model-len: 20480 dtype: auto enforce-eager: false enable-auto-tool-choice: true