From 699c8537b078ead7a7540a8d93fc5b74505e4595 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 17:52:34 +0100 Subject: [PATCH] fix: use LiteLLM vLLM pass-through for qwen model - Changed model from openai/qwen-2.5-7b to hosted_vllm/qwen-2.5-7b - Implements proper vLLM integration per LiteLLM docs - Fixes streaming response forwarding issue --- ai/litellm-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index b0aa5f8..061dbe2 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: openai/qwen-2.5-7b + model: hosted_vllm/qwen-2.5-7b api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000