fix: remove api_key from vLLM config to fix authentication error

vLLM servers don't validate API keys, so LiteLLM shouldn't pass them

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-23 16:16:37 +01:00
parent 7fc945e179
commit 55d9bef18a

View File

@@ -34,7 +34,6 @@ model_list:
litellm_params:
model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ prefix for proper streaming
api_base: os.environ/GPU_VLLM_QWEN_URL # Direct to vLLM Qwen server
api_key: dummy
rpm: 1000
tpm: 100000
timeout: 600 # 10 minutes for generation
@@ -47,7 +46,6 @@ model_list:
litellm_params:
model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ prefix for proper streaming
api_base: os.environ/GPU_VLLM_LLAMA_URL # Direct to vLLM Llama server
api_key: dummy
rpm: 1000
tpm: 100000
timeout: 600 # 10 minutes for generation