fix: use hosted_vllm/openai/ prefix for vLLM model via orchestrator
This commit is contained in:
@@ -32,7 +32,7 @@ model_list:
|
|||||||
# Text Generation
|
# Text Generation
|
||||||
- model_name: qwen-2.5-7b
|
- model_name: qwen-2.5-7b
|
||||||
litellm_params:
|
litellm_params:
|
||||||
model: hosted_vllm/qwen-2.5-7b # vLLM model ID from /v1/models endpoint
|
model: hosted_vllm/openai/qwen-2.5-7b # hosted_vllm/openai/ for vLLM via orchestrator
|
||||||
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
|
api_base: http://100.121.199.88:9000/v1 # RunPod GPU via Tailscale
|
||||||
api_key: dummy
|
api_key: dummy
|
||||||
rpm: 1000
|
rpm: 1000
|
||||||
|
|||||||
Reference in New Issue
Block a user