fix: revert to openai prefix, remove /v1 suffix from api_base

- Changed back from hosted_vllm/qwen-2.5-7b to openai/qwen-2.5-7b
- Removed /v1 suffix from api_base (LiteLLM adds it automatically)
- Added supports_system_messages: false for vLLM compatibility
This commit is contained in:
2025-11-21 17:55:10 +01:00
parent 699c8537b0
commit 42a68bc0b5

View File

@@ -32,13 +32,14 @@ model_list:
# Text Generation
- model_name: qwen-2.5-7b
litellm_params:
model: hosted_vllm/qwen-2.5-7b
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
model: openai/qwen-2.5-7b
api_base: http://100.121.199.88:9000 # RunPod GPU via Tailscale (no /v1 suffix)
api_key: dummy
rpm: 1000
tpm: 100000
timeout: 600 # 10 minutes for generation
stream_timeout: 600
supports_system_messages: false # vLLM handles system messages differently
# Image Generation
- model_name: flux-schnell