From 81d4058c5d11b9764a8abfa0c725800e33a3fe2d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 18:57:10 +0100 Subject: [PATCH] revert: back to openai prefix for vLLM OpenAI-compatible endpoint --- ai/litellm-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index b3e0cd4..feb2560 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: hosted_vllm/qwen-2.5-7b + model: openai/qwen-2.5-7b # OpenAI-compatible vLLM endpoint api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000