fix: use complete URL env var for vLLM API base

- Replace GPU_TAILSCALE_IP interpolation with GPU_VLLM_API_URL
- LiteLLM requires full URL in api_base with os.environ/ syntax

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-23 13:17:37 +01:00
parent f3f32c163f
commit 779e76974d
2 changed files with 3 additions and 2 deletions

View File

@@ -102,6 +102,7 @@ services:
LITELLM_MASTER_KEY: ${AI_LITELLM_API_KEY} LITELLM_MASTER_KEY: ${AI_LITELLM_API_KEY}
DATABASE_URL: postgresql://${AI_DB_USER}:${AI_DB_PASSWORD}@ai_postgres:5432/litellm DATABASE_URL: postgresql://${AI_DB_USER}:${AI_DB_PASSWORD}@ai_postgres:5432/litellm
GPU_TAILSCALE_IP: ${GPU_TAILSCALE_IP} GPU_TAILSCALE_IP: ${GPU_TAILSCALE_IP}
GPU_VLLM_API_URL: ${GPU_VLLM_API_URL}
# LITELLM_DROP_PARAMS: 'true' # DISABLED: Was breaking streaming # LITELLM_DROP_PARAMS: 'true' # DISABLED: Was breaking streaming
NO_DOCS: 'true' NO_DOCS: 'true'
NO_REDOC: 'true' NO_REDOC: 'true'

View File

@@ -33,7 +33,7 @@ model_list:
- model_name: qwen-2.5-7b - model_name: qwen-2.5-7b
litellm_params: litellm_params:
model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator
api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale api_base: os.environ/GPU_VLLM_API_URL # RunPod GPU via Tailscale
api_key: dummy api_key: dummy
rpm: 1000 rpm: 1000
tpm: 100000 tpm: 100000
@@ -45,7 +45,7 @@ model_list:
- model_name: llama-3.1-8b - model_name: llama-3.1-8b
litellm_params: litellm_params:
model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator
api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale api_base: os.environ/GPU_VLLM_API_URL # RunPod GPU via Tailscale
api_key: dummy api_key: dummy
rpm: 1000 rpm: 1000
tpm: 100000 tpm: 100000