Add Llama 3.1 8B model to LiteLLM configuration
This commit is contained in:
@@ -42,6 +42,18 @@ model_list:
|
||||
supports_system_messages: false # vLLM handles system messages differently
|
||||
stream: true # Enable streaming by default
|
||||
|
||||
- model_name: llama-3.1-8b
|
||||
litellm_params:
|
||||
model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ for vLLM via orchestrator
|
||||
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
|
||||
api_key: dummy
|
||||
rpm: 1000
|
||||
tpm: 100000
|
||||
timeout: 600 # 10 minutes for generation
|
||||
stream_timeout: 600
|
||||
supports_system_messages: true # Llama supports system messages
|
||||
stream: true # Enable streaming by default
|
||||
|
||||
litellm_settings:
|
||||
drop_params: false # DISABLED: Was breaking streaming
|
||||
set_verbose: true # Enable verbose logging for debugging streaming issues
|
||||
|
||||
Reference in New Issue
Block a user