diff --git a/ai/litellm-config.yaml b/ai/litellm-config.yaml index 38eb995..2f7adb2 100644 --- a/ai/litellm-config.yaml +++ b/ai/litellm-config.yaml @@ -32,7 +32,7 @@ model_list: # Text Generation - model_name: qwen-2.5-7b litellm_params: - model: hosted_vllm/qwen-2.5-7b # vLLM model ID from /v1/models endpoint + model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale api_key: dummy rpm: 1000