From f68bc4791547079c7e0819cd597c49e94e29cae0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 30 Nov 2025 22:15:08 +0100 Subject: [PATCH] feat: increase Llama max-model-len to 20480 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjusted VRAM allocation for larger context window: - Llama: 90% VRAM, 20480 context (up from 8192) - BGE: 8% VRAM (down from 10%) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- services/vllm/config_bge.yaml | 2 +- services/vllm/config_llama.yaml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/services/vllm/config_bge.yaml b/services/vllm/config_bge.yaml index 1384ad5..f1a8c15 100644 --- a/services/vllm/config_bge.yaml +++ b/services/vllm/config_bge.yaml @@ -2,6 +2,6 @@ model: BAAI/bge-large-en-v1.5 host: "0.0.0.0" port: 8002 uvicorn-log-level: "info" -gpu-memory-utilization: 0.10 +gpu-memory-utilization: 0.08 dtype: float16 task: embed diff --git a/services/vllm/config_llama.yaml b/services/vllm/config_llama.yaml index b08af74..e0d7e25 100644 --- a/services/vllm/config_llama.yaml +++ b/services/vllm/config_llama.yaml @@ -2,8 +2,8 @@ model: meta-llama/Llama-3.1-8B-Instruct host: "0.0.0.0" port: 8001 uvicorn-log-level: "info" -gpu-memory-utilization: 0.85 -max-model-len: 8192 +gpu-memory-utilization: 0.90 +max-model-len: 20480 dtype: auto enforce-eager: false enable-auto-tool-choice: true