feat: consolidate GPU IP with single GPU_TAILSCALE_IP variable
- Replace COMFYUI_BACKEND_HOST and SUPERVISOR_BACKEND_HOST with GPU_TAILSCALE_IP - Update LiteLLM config to use os.environ/GPU_TAILSCALE_IP for vLLM models - Add GPU_TAILSCALE_IP env var to LiteLLM service - Configure qwen-2.5-7b and llama-3.1-8b to route through orchestrator 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -101,6 +101,7 @@ services:
|
||||
ANTHROPIC_API_KEY: ${ANTHROPIC_API_KEY}
|
||||
LITELLM_MASTER_KEY: ${AI_LITELLM_API_KEY}
|
||||
DATABASE_URL: postgresql://${AI_DB_USER}:${AI_DB_PASSWORD}@ai_postgres:5432/litellm
|
||||
GPU_TAILSCALE_IP: ${GPU_TAILSCALE_IP}
|
||||
# LITELLM_DROP_PARAMS: 'true' # DISABLED: Was breaking streaming
|
||||
NO_DOCS: 'true'
|
||||
NO_REDOC: 'true'
|
||||
@@ -206,7 +207,7 @@ services:
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TZ: ${TIMEZONE:-Europe/Berlin}
|
||||
COMFYUI_BACKEND_HOST: ${COMFYUI_BACKEND_HOST}
|
||||
COMFYUI_BACKEND_HOST: ${GPU_TAILSCALE_IP}
|
||||
COMFYUI_BACKEND_PORT: ${COMFYUI_BACKEND_PORT:-8188}
|
||||
volumes:
|
||||
- ./comfyui-nginx.conf:/etc/nginx/nginx.conf.template:ro
|
||||
@@ -239,7 +240,7 @@ services:
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
TZ: ${TIMEZONE:-Europe/Berlin}
|
||||
SUPERVISOR_BACKEND_HOST: ${SUPERVISOR_BACKEND_HOST}
|
||||
SUPERVISOR_BACKEND_HOST: ${GPU_TAILSCALE_IP}
|
||||
SUPERVISOR_BACKEND_PORT: ${SUPERVISOR_BACKEND_PORT:-9001}
|
||||
volumes:
|
||||
- ./supervisor-nginx.conf:/etc/nginx/nginx.conf.template:ro
|
||||
|
||||
@@ -33,7 +33,7 @@ model_list:
|
||||
- model_name: qwen-2.5-7b
|
||||
litellm_params:
|
||||
model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator
|
||||
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
|
||||
api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale
|
||||
api_key: dummy
|
||||
rpm: 1000
|
||||
tpm: 100000
|
||||
@@ -45,7 +45,7 @@ model_list:
|
||||
- model_name: llama-3.1-8b
|
||||
litellm_params:
|
||||
model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator
|
||||
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
|
||||
api_base: http://os.environ/GPU_TAILSCALE_IP:9000/v1 # RunPod GPU via Tailscale
|
||||
api_key: dummy
|
||||
rpm: 1000
|
||||
tpm: 100000
|
||||
|
||||
Reference in New Issue
Block a user