From 42a68bc0b5670b3442701f688de2f1f30134edfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 17:55:10 +0100 Subject: [PATCH] fix: revert to openai prefix, remove /v1 suffix from api_base - Changed back from hosted_vllm/qwen-2.5-7b to openai/qwen-2.5-7b - Removed /v1 suffix from api_base (LiteLLM adds it automatically) - Added supports_system_messages: false for vLLM compatibility --- ai/litellm-config.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 061dbe2..3a3bb38 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,13 +32,14 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: hosted_vllm/qwen-2.5-7b - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale + model: openai/qwen-2.5-7b + api_base: http://100.121.199.88:9000 # RunPod GPU via Tailscale (no /v1 suffix) api_key: dummy rpm: 1000 tpm: 100000 timeout: 600 # 10 minutes for generation stream_timeout: 600 + supports_system_messages: false # vLLM handles system messages differently # Image Generation - model_name: flux-schnell