fix: remove vllm embedding

This commit is contained in:
2025-11-27 01:11:43 +01:00
parent cc270c8539
commit 69869ec3fb

View File

@@ -42,15 +42,6 @@ model_list:
supports_system_messages: true # Llama supports system messages
stream: true # Enable streaming by default
# Embeddings - BGE Large (Port 8002)
- model_name: bge-large-en-v1.5
litellm_params:
model: hosted_vllm/BAAI/bge-large-en-v1.5
api_base: os.environ/GPU_VLLM_EMBED_URL # Direct to vLLM embedding server
api_key: "EMPTY"
rpm: 1000
tpm: 500000
litellm_settings:
drop_params: false # DISABLED: Was breaking streaming
set_verbose: true # Enable verbose logging for debugging streaming issues