diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index ea43d54..76cfd1d 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - Llama 3.1 8B (Port 8001) - model_name: llama-3.1-8b litellm_params: - model: hosted_vllm/openai/llama-3.1-8b # hosted_vllm/openai/ prefix for proper streaming + model: hosted_vllm/meta-llama/Llama-3.1-8B-Instruct # hosted_vllm/openai/ prefix for proper streaming api_base: os.environ/GPU_VLLM_LLAMA_URL # Direct to vLLM Llama server api_key: "EMPTY" # vLLM doesn't validate API keys rpm: 1000 @@ -45,7 +45,7 @@ model_list: # Embeddings - BGE Large (Port 8002) - model_name: bge-large-en-v1.5 litellm_params: - model: hosted_vllm/openai/bge-large-en-v1.5 + model: hosted_vllm/BAAI/bge-large-en-v1.5 api_base: os.environ/GPU_VLLM_EMBED_URL # Direct to vLLM embedding server api_key: "EMPTY" rpm: 1000