diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 061dbe2..3a3bb38 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,13 +32,14 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: hosted_vllm/qwen-2.5-7b - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale + model: openai/qwen-2.5-7b + api_base: http://100.121.199.88:9000 # RunPod GPU via Tailscale (no /v1 suffix) api_key: dummy rpm: 1000 tpm: 100000 timeout: 600 # 10 minutes for generation stream_timeout: 600 + supports_system_messages: false # vLLM handles system messages differently # Image Generation - model_name: flux-schnell