From 55d9bef18a6fae0751b5a1e0830c4d8f9693882a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 23 Nov 2025 16:16:37 +0100 Subject: [PATCH] fix: remove api_key from vLLM config to fix authentication error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit vLLM servers don't validate API keys, so LiteLLM shouldn't pass them 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- ai/litellm-config.yaml | 2 -- 1 file changed, 2 deletions(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index b43f433..b73c189 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -34,7 +34,6 @@ model_list: litellm_params: model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ prefix for proper streaming api_base: os.environ/GPU_VLLM_QWEN_URL # Direct to vLLM Qwen server - api_key: dummy rpm: 1000 tpm: 100000 timeout: 600 # 10 minutes for generation @@ -47,7 +46,6 @@ model_list: litellm_params: model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ prefix for proper streaming api_base: os.environ/GPU_VLLM_LLAMA_URL # Direct to vLLM Llama server - api_key: dummy rpm: 1000 tpm: 100000 timeout: 600 # 10 minutes for generation