From c969d10eaf242e27549928eebf0807cea70a4b74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 30 Nov 2025 22:37:11 +0100 Subject: [PATCH] feat: increase Llama context to 32K with 95% VRAM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit For larger input requirements, increased max-model-len from 20480 to 32768. BGE remains available but cannot run concurrently at this VRAM level. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- services/vllm/config_llama.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/services/vllm/config_llama.yaml b/services/vllm/config_llama.yaml index e0d7e25..1a44e59 100644 --- a/services/vllm/config_llama.yaml +++ b/services/vllm/config_llama.yaml @@ -2,8 +2,8 @@ model: meta-llama/Llama-3.1-8B-Instruct host: "0.0.0.0" port: 8001 uvicorn-log-level: "info" -gpu-memory-utilization: 0.90 -max-model-len: 20480 +gpu-memory-utilization: 0.95 +max-model-len: 32768 dtype: auto enforce-eager: false enable-auto-tool-choice: true