fix: use hosted_vllm/openai/ prefix for vLLM model via orchestrator

This commit is contained in:
2025-11-21 19:18:33 +01:00
parent 155016da97
commit ec903c16c2

View File

@@ -32,7 +32,7 @@ model_list:
# Text Generation
- model_name: qwen-2.5-7b
litellm_params:
model: hosted_vllm/qwen-2.5-7b # vLLM model ID from /v1/models endpoint
model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
api_key: dummy
rpm: 1000