This commit finalizes the GPU infrastructure deployment on RunPod: - Added qwen-2.5-7b model to LiteLLM configuration - Self-hosted on RunPod RTX 4090 GPU server - Connected via Tailscale VPN (100.100.108.13:8000) - OpenAI-compatible API endpoint - Rate limits: 1000 RPM, 100k TPM - Marked GPU deployment as COMPLETE in deployment log - vLLM 0.6.4.post1 with custom AsyncLLMEngine server - Qwen/Qwen2.5-7B-Instruct model (14.25 GB) - 85% GPU memory utilization, 4096 context length - Successfully integrated with Open WebUI at ai.pivoine.art Infrastructure: - Provider: RunPod Spot Instance (~$0.50/hr) - GPU: NVIDIA RTX 4090 24GB - Disk: 50GB local SSD + 922TB network volume - VPN: Tailscale (replaces WireGuard due to RunPod UDP restrictions) Model now visible and accessible in Open WebUI for end users. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
69 lines
1.9 KiB
YAML
69 lines
1.9 KiB
YAML
model_list:
|
|
- model_name: claude-sonnet-4
|
|
litellm_params:
|
|
model: anthropic/claude-sonnet-4-20250514
|
|
api_key: os.environ/ANTHROPIC_API_KEY
|
|
|
|
- model_name: claude-sonnet-4.5
|
|
litellm_params:
|
|
model: anthropic/claude-sonnet-4-5-20250929
|
|
api_key: os.environ/ANTHROPIC_API_KEY
|
|
|
|
- model_name: claude-3-5-sonnet
|
|
litellm_params:
|
|
model: anthropic/claude-3-5-sonnet-20241022
|
|
api_key: os.environ/ANTHROPIC_API_KEY
|
|
|
|
- model_name: claude-3-opus
|
|
litellm_params:
|
|
model: anthropic/claude-3-opus-20240229
|
|
api_key: os.environ/ANTHROPIC_API_KEY
|
|
|
|
- model_name: claude-3-haiku
|
|
litellm_params:
|
|
model: anthropic/claude-3-haiku-20240307
|
|
api_key: os.environ/ANTHROPIC_API_KEY
|
|
|
|
# Self-hosted model on GPU server via Tailscale VPN
|
|
- model_name: qwen-2.5-7b
|
|
litellm_params:
|
|
model: openai/qwen-2.5-7b
|
|
api_base: http://100.100.108.13:8000/v1
|
|
api_key: dummy
|
|
rpm: 1000
|
|
tpm: 100000
|
|
|
|
litellm_settings:
|
|
drop_params: true
|
|
set_verbose: false # Disable verbose logging for better performance
|
|
# Enable caching with Redis for better performance
|
|
cache: true
|
|
cache_params:
|
|
type: redis
|
|
host: redis
|
|
port: 6379
|
|
ttl: 3600 # Cache for 1 hour
|
|
# Force strip specific parameters globally
|
|
allowed_fails: 0
|
|
# Modify params before sending to provider
|
|
modify_params: true
|
|
# Enable success and failure logging but minimize overhead
|
|
success_callback: [] # Disable all success callbacks to reduce DB writes
|
|
failure_callback: [] # Disable all failure callbacks
|
|
|
|
router_settings:
|
|
allowed_fails: 0
|
|
|
|
# Drop unsupported parameters
|
|
default_litellm_params:
|
|
drop_params: true
|
|
|
|
general_settings:
|
|
disable_responses_id_security: true
|
|
# Disable spend tracking to reduce database overhead
|
|
disable_spend_logs: true
|
|
# Disable tag tracking
|
|
disable_tag_tracking: true
|
|
# Disable daily spend updates
|
|
disable_daily_spend_logs: true
|