From f3f32c163f71c2096d375308548ca5273d0b4459 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 23 Nov 2025 13:05:33 +0100 Subject: [PATCH] feat: consolidate GPU IP with single GPU_TAILSCALE_IP variable MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Replace COMFYUI_BACKEND_HOST and SUPERVISOR_BACKEND_HOST with GPU_TAILSCALE_IP - Update LiteLLM config to use os.environ/GPU_TAILSCALE_IP for vLLM models - Add GPU_TAILSCALE_IP env var to LiteLLM service - Configure qwen-2.5-7b and llama-3.1-8b to route through orchestrator 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- ai/compose.yaml | 5 +++-- ai/litellm-config.yaml | 4 ++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/ai/compose.yaml b/ai/compose.yaml index 2827580..1279229 100644 --- a/ai/compose.yaml +++ b/ai/compose.yaml @@ -101,6 +101,7 @@ services: ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY} LITELLM_MASTER_KEY: ${AI_LITELLM_API_KEY} DATABASE_URL: postgresql://${AI_DB_USER}:${AI_DB_PASSWORD}@ai_postgres:5432/litellm + GPU_TAILSCALE_IP: ${GPU_TAILSCALE_IP} # LITELLM_DROP_PARAMS: 'true' # DISABLED: Was breaking streaming NO_DOCS: 'true' NO_REDOC: 'true' @@ -206,7 +207,7 @@ services: restart: unless-stopped environment: TZ: ${TIMEZONE:-Europe/Berlin} - COMFYUI_BACKEND_HOST: ${COMFYUI_BACKEND_HOST} + COMFYUI_BACKEND_HOST: ${GPU_TAILSCALE_IP} COMFYUI_BACKEND_PORT: ${COMFYUI_BACKEND_PORT:-8188} volumes: - ./comfyui-nginx.conf:/etc/nginx/nginx.conf.template:ro @@ -239,7 +240,7 @@ services: restart: unless-stopped environment: TZ: ${TIMEZONE:-Europe/Berlin} - SUPERVISOR_BACKEND_HOST: ${SUPERVISOR_BACKEND_HOST} + SUPERVISOR_BACKEND_HOST: ${GPU_TAILSCALE_IP} SUPERVISOR_BACKEND_PORT: ${SUPERVISOR_BACKEND_PORT:-9001} volumes: - ./supervisor-nginx.conf:/etc/nginx/nginx.conf.template:ro diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 11a8c7a..cf02a5b 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -33,7 +33,7 @@ model_list: - model_name: qwen-2.5-7b litellm_params: model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale + api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000 tpm: 100000 @@ -45,7 +45,7 @@ model_list: - model_name: llama-3.1-8b litellm_params: model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale + api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000 tpm: 100000