feat: remove GPU support and simplify to CPU-only architecture
All checks were successful
Build and Push Docker Image / build (push) Successful in 8m35s

This commit is contained in:
Developer
2026-02-19 12:41:13 +01:00
parent cff3eb0add
commit 706e6c431d
16 changed files with 116 additions and 323 deletions

View File

@@ -75,20 +75,6 @@ async def get_system_info() -> SystemInfo:
disk = psutil.disk_usage('/')
disk_percent = disk.percent
# GPU
gpu_available = False
gpu_memory_mb = None
gpu_memory_used_mb = None
try:
import torch
gpu_available = torch.cuda.is_available()
if gpu_available:
gpu_memory_mb = int(torch.cuda.get_device_properties(0).total_memory / (1024 * 1024))
gpu_memory_used_mb = int(torch.cuda.memory_allocated(0) / (1024 * 1024))
except Exception:
pass
# Models directory size
models_size = file_manager.get_directory_size_mb(settings.models_dir)
@@ -103,9 +89,6 @@ async def get_system_info() -> SystemInfo:
cpu_usage_percent=cpu_percent,
memory_usage_percent=memory_percent,
disk_usage_percent=disk_percent,
gpu_available=gpu_available,
gpu_memory_mb=gpu_memory_mb,
gpu_memory_used_mb=gpu_memory_used_mb,
execution_providers=settings.get_execution_providers(),
models_dir_size_mb=models_size,
jobs_queue_length=queue_length,

View File

@@ -20,9 +20,6 @@ class SystemInfo(BaseModel):
cpu_usage_percent: float
memory_usage_percent: float
disk_usage_percent: float
gpu_available: bool
gpu_memory_mb: Optional[int] = None
gpu_memory_used_mb: Optional[int] = None
execution_providers: list
models_dir_size_mb: float
jobs_queue_length: int

View File

@@ -83,7 +83,7 @@ class RealESRGANBridge:
tile=settings.tile_size,
tile_pad=settings.tile_pad,
pre_pad=0,
half=('cuda' in settings.get_execution_providers()),
half=False,
)
self.current_model = model_name
@@ -134,7 +134,7 @@ class RealESRGANBridge:
tile=settings.tile_size,
tile_pad=settings.tile_pad,
pre_pad=0,
half=('cuda' in settings.get_execution_providers()),
half=False,
)
self.current_model = model_name
@@ -198,15 +198,6 @@ class RealESRGANBridge:
return self.upsampler.scale
return 4
def clear_memory(self) -> None:
"""Clear GPU memory if available."""
try:
import torch
torch.cuda.empty_cache()
logger.debug('GPU memory cleared')
except Exception:
pass
# Global instance
_bridge: Optional[RealESRGANBridge] = None