feat: add BGE embedding model for concurrent operation with Llama
All checks were successful
Build and Push RunPod Docker Image / build-and-push (push) Successful in 36s

- Create config_bge.yaml for BAAI/bge-large-en-v1.5 on port 8002
- Reduce Llama VRAM to 70% and context to 16K for concurrent use
- Add BGE service to supervisor with vllm group

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-30 19:55:13 +01:00
parent c32340f23c
commit f668e06228
3 changed files with 30 additions and 3 deletions

View File

@@ -74,6 +74,24 @@ priority=200
stopwaitsecs=30
# vLLM BGE Embedding Server (Port 8002)
[program:bge]
command=services/vllm/venv/bin/vllm serve --config services/vllm/config_bge.yaml
directory=.
autostart=false
autorestart=true
startretries=3
stderr_logfile=.logs/bge.err.log
stdout_logfile=.logs/bge.out.log
stdout_logfile_maxbytes=50MB
stdout_logfile_backups=10
stderr_logfile_maxbytes=50MB
stderr_logfile_backups=10
environment=HF_HOME="./.cache/vllm",HF_TOKEN="%(ENV_HF_TOKEN)s"
priority=210
stopwaitsecs=30
# AudioCraft Studio Service
[program:audiocraft]
command=services/audiocraft/venv/bin/python services/audiocraft/main.py
@@ -113,7 +131,7 @@ programs=comfyui,webdav-sync
priority=100
[group:vllm]
programs=llama
programs=llama,bge
priority=200
[group:audiocraft]