fix: add /v1 suffix to vLLM api_base for proper endpoint routing

This commit is contained in:
2025-11-21 18:00:53 +01:00
parent 42a68bc0b5
commit dfde1df72f

View File

@@ -33,7 +33,7 @@ model_list:
- model_name: qwen-2.5-7b - model_name: qwen-2.5-7b
litellm_params: litellm_params:
model: openai/qwen-2.5-7b model: openai/qwen-2.5-7b
api_base: http://100.121.199.88:9000 # RunPod GPU via Tailscale (no /v1 suffix) api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
api_key: dummy api_key: dummy
rpm: 1000 rpm: 1000
tpm: 100000 tpm: 100000