From bf402adb2532827fb05fcb35088998210d3c1f26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 21:30:18 +0100 Subject: [PATCH] Add Llama 3.1 8B model to LiteLLM configuration --- ai/litellm-config.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index fcd2a66..11a8c7a 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -42,6 +42,18 @@ model_list: supports_system_messages: false # vLLM handles system messages differently stream: true # Enable streaming by default + - model_name: llama-3.1-8b + litellm_params: + model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator + api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale + api_key: dummy + rpm: 1000 + tpm: 100000 + timeout: 600 # 10 minutes for generation + stream_timeout: 600 + supports_system_messages: true # Llama supports system messages + stream: true # Enable streaming by default + litellm_settings: drop_params: false # DISABLED: Was breaking streaming set_verbose: true # Enable verbose logging for debugging streaming issues