From dfde1df72f8bc23f16dabbbb7c6ccae9a00c8bd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 18:00:53 +0100 Subject: [PATCH] fix: add /v1 suffix to vLLM api_base for proper endpoint routing --- ai/litellm-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 3a3bb38..9a87781 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -33,7 +33,7 @@ model_list: - model_name: qwen-2.5-7b litellm_params: model: openai/qwen-2.5-7b - api_base: http://100.121.199.88:9000 # RunPod GPU via Tailscale (no /v1 suffix) + api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000 tpm: 100000