From 57b706abe62c3b6b31890eb708a21436f57c7f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Fri, 21 Nov 2025 16:28:54 +0100 Subject: [PATCH] fix: correct vLLM service port to 8000 - Updated qwen-2.5-7b port from 8001 to 8000 in models.yaml - Matches actual vLLM server default port configuration - Tested and verified: orchestrator successfully loaded model and generated response --- model-orchestrator/models.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/model-orchestrator/models.yaml b/model-orchestrator/models.yaml index d3e6a4b..613f0d2 100644 --- a/model-orchestrator/models.yaml +++ b/model-orchestrator/models.yaml @@ -7,7 +7,7 @@ models: type: text framework: vllm service_script: models/vllm/server.py - port: 8001 + port: 8000 vram_gb: 14 startup_time_seconds: 120 endpoint: /v1/chat/completions