feat: add BGE embedding model for concurrent operation with Llama
All checks were successful
Build and Push RunPod Docker Image / build-and-push (push) Successful in 36s

- Create config_bge.yaml for BAAI/bge-large-en-v1.5 on port 8002
- Reduce Llama VRAM to 70% and context to 16K for concurrent use
- Add BGE service to supervisor with vllm group

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-30 19:55:13 +01:00
parent c32340f23c
commit f668e06228
3 changed files with 30 additions and 3 deletions

View File

@@ -0,0 +1,7 @@
model: BAAI/bge-large-en-v1.5
host: "0.0.0.0"
port: 8002
uvicorn-log-level: "info"
gpu-memory-utilization: 0.15
dtype: float16
task: embed

View File

@@ -2,7 +2,9 @@ model: meta-llama/Llama-3.1-8B-Instruct
host: "0.0.0.0"
port: 8001
uvicorn-log-level: "info"
gpu-memory-utilization: 0.95
max-model-len: 20480
gpu-memory-utilization: 0.70
max-model-len: 16384
dtype: auto
enforce-eager: false
enable-auto-tool-choice: true
tool-call-parser: "llama3_json"

View File

@@ -74,6 +74,24 @@ priority=200
stopwaitsecs=30
# vLLM BGE Embedding Server (Port 8002)
[program:bge]
command=services/vllm/venv/bin/vllm serve --config services/vllm/config_bge.yaml
directory=.
autostart=false
autorestart=true
startretries=3
stderr_logfile=.logs/bge.err.log
stdout_logfile=.logs/bge.out.log
stdout_logfile_maxbytes=50MB
stdout_logfile_backups=10
stderr_logfile_maxbytes=50MB
stderr_logfile_backups=10
environment=HF_HOME="./.cache/vllm",HF_TOKEN="%(ENV_HF_TOKEN)s"
priority=210
stopwaitsecs=30
# AudioCraft Studio Service
[program:audiocraft]
command=services/audiocraft/venv/bin/python services/audiocraft/main.py
@@ -113,7 +131,7 @@ programs=comfyui,webdav-sync
priority=100
[group:vllm]
programs=llama
programs=llama,bge
priority=200
[group:audiocraft]