From fe0cf487ee8193a88e2a80c869f9f2b5f19096da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 19:02:44 +0100 Subject: [PATCH] fix: use correct vLLM model name with hosted_vllm prefix --- ai/litellm-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index feb2560..6a029cc 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: openai/qwen-2.5-7b # OpenAI-compatible vLLM endpoint + model: hosted_vllm/Qwen/Qwen2.5-7B-Instruct # vLLM HuggingFace model name api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000