diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index c6373b0..fcd2a66 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -42,24 +42,6 @@ model_list: supports_system_messages: false # vLLM handles system messages differently stream: true # Enable streaming by default - # Image Generation - - model_name: flux-schnell - litellm_params: - model: openai/dall-e-3 # OpenAI-compatible mapping - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale - api_key: dummy - rpm: 100 - max_parallel_requests: 3 - - # Music Generation - - model_name: musicgen-medium - litellm_params: - model: openai/musicgen-medium - api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale - api_key: dummy - rpm: 50 - max_parallel_requests: 1 - litellm_settings: drop_params: false # DISABLED: Was breaking streaming set_verbose: true # Enable verbose logging for debugging streaming issues