fix(ai): correct bge embedding model name to hosted_vllm/openai prefix
🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
@@ -58,7 +58,7 @@ model_list:
|
||||
# Embeddings - BGE Large (Port 8002)
|
||||
- model_name: bge-large-en-v1.5
|
||||
litellm_params:
|
||||
model: hosted_vllm/BAAI/bge-large-en-v1.5
|
||||
model: hosted_vllm/openai/bge-large-en-v1.5
|
||||
api_base: os.environ/GPU_VLLM_EMBED_URL # Direct to vLLM embedding server
|
||||
api_key: "EMPTY"
|
||||
rpm: 1000
|
||||
|
||||
Reference in New Issue
Block a user