From ec903c16c233055c4991841d85901c502d5ed60a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 19:18:33 +0100 Subject: [PATCH] fix: use hosted_vllm/openai/ prefix for vLLM model via orchestrator --- ai/litellm-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 38eb995..2f7adb2 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: hosted_vllm/qwen-2.5-7b # vLLM model ID from /v1/models endpoint + model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000