From f8694653d042bf7af388500cd78b611f6844ce76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 30 Nov 2025 22:46:34 +0100 Subject: [PATCH] fix: adjust VRAM for 24K context based on actual usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Based on error output, model uses ~17.5GB (not 15GB estimated). - Llama: 85% VRAM for 24576 context (3GB KV cache) - BGE: 6% VRAM (reduced to fit) - Total: 91% 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- services/vllm/config_bge.yaml | 2 +- services/vllm/config_llama.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/services/vllm/config_bge.yaml b/services/vllm/config_bge.yaml index f1a8c15..f8cd65b 100644 --- a/services/vllm/config_bge.yaml +++ b/services/vllm/config_bge.yaml @@ -2,6 +2,6 @@ model: BAAI/bge-large-en-v1.5 host: "0.0.0.0" port: 8002 uvicorn-log-level: "info" -gpu-memory-utilization: 0.08 +gpu-memory-utilization: 0.06 dtype: float16 task: embed diff --git a/services/vllm/config_llama.yaml b/services/vllm/config_llama.yaml index de48363..9cfeb6b 100644 --- a/services/vllm/config_llama.yaml +++ b/services/vllm/config_llama.yaml @@ -2,7 +2,7 @@ model: meta-llama/Llama-3.1-8B-Instruct host: "0.0.0.0" port: 8001 uvicorn-log-level: "info" -gpu-memory-utilization: 0.80 +gpu-memory-utilization: 0.85 max-model-len: 24576 dtype: auto enforce-eager: false