Initial commit: FaceFusion REST API
FastAPI wrapper around FaceFusion v3.5.3 submodule with: - Sync and async (job-based) processing endpoints - FaceFusion bridge with manual key registration and Lock-serialized processing - Multi-target Dockerfile (CPU + CUDA GPU) - Docker Compose configs for dev, prod-cpu, and prod-gpu - Gitea CI/CD workflow with dual image builds - All 11 FaceFusion processors supported via options API Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
0
app/__init__.py
Normal file
0
app/__init__.py
Normal file
38
app/config.py
Normal file
38
app/config.py
Normal file
@@ -0,0 +1,38 @@
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from pydantic_settings import BaseSettings
|
||||
|
||||
|
||||
class Settings(BaseSettings):
|
||||
model_config = {'env_prefix': 'FF_'}
|
||||
|
||||
# Paths
|
||||
upload_dir: str = '/data/uploads'
|
||||
output_dir: str = '/data/outputs'
|
||||
models_dir: str = '/data/models'
|
||||
temp_dir: str = '/data/temp'
|
||||
jobs_dir: str = '/data/jobs'
|
||||
|
||||
# FaceFusion defaults
|
||||
execution_providers: str = '["cpu"]'
|
||||
execution_thread_count: int = 4
|
||||
video_memory_strategy: str = 'moderate'
|
||||
face_detector_model: str = 'yolo_face'
|
||||
download_providers: str = '["github", "huggingface"]'
|
||||
download_scope: str = 'lite'
|
||||
log_level: str = 'info'
|
||||
|
||||
# Limits
|
||||
max_upload_size_mb: int = 500
|
||||
sync_timeout_seconds: int = 120
|
||||
auto_cleanup_hours: int = 24
|
||||
|
||||
def get_execution_providers(self) -> List[str]:
|
||||
return json.loads(self.execution_providers)
|
||||
|
||||
def get_download_providers(self) -> List[str]:
|
||||
return json.loads(self.download_providers)
|
||||
|
||||
|
||||
settings = Settings()
|
||||
50
app/main.py
Normal file
50
app/main.py
Normal file
@@ -0,0 +1,50 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
from contextlib import asynccontextmanager
|
||||
|
||||
from fastapi import FastAPI
|
||||
|
||||
# Ensure FaceFusion submodule is importable (must happen before any facefusion imports)
|
||||
_project_root = os.path.dirname(os.path.dirname(__file__))
|
||||
for _candidate in (os.path.join(_project_root, 'facefusion'), '/app/facefusion-src'):
|
||||
if os.path.isdir(os.path.join(_candidate, 'facefusion')) and _candidate not in sys.path:
|
||||
sys.path.insert(0, _candidate)
|
||||
break
|
||||
|
||||
from app.routers import jobs, process, processors, system
|
||||
from app.services import facefusion_bridge, file_manager
|
||||
from app.services.worker import worker_queue
|
||||
|
||||
logging.basicConfig(
|
||||
level=logging.INFO,
|
||||
format='%(asctime)s %(levelname)s %(name)s: %(message)s',
|
||||
)
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@asynccontextmanager
|
||||
async def lifespan(app: FastAPI):
|
||||
# Startup
|
||||
logger.info('Starting FaceFusion API...')
|
||||
file_manager.ensure_directories()
|
||||
facefusion_bridge.initialize()
|
||||
worker_queue.start(facefusion_bridge.process_sync)
|
||||
logger.info('FaceFusion API ready')
|
||||
yield
|
||||
# Shutdown
|
||||
logger.info('Shutting down...')
|
||||
worker_queue.stop()
|
||||
|
||||
|
||||
app = FastAPI(
|
||||
title='FaceFusion API',
|
||||
version='1.0.0',
|
||||
description='REST API for FaceFusion face processing',
|
||||
lifespan=lifespan,
|
||||
)
|
||||
|
||||
app.include_router(process.router)
|
||||
app.include_router(jobs.router)
|
||||
app.include_router(processors.router)
|
||||
app.include_router(system.router)
|
||||
0
app/routers/__init__.py
Normal file
0
app/routers/__init__.py
Normal file
105
app/routers/jobs.py
Normal file
105
app/routers/jobs.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import json
|
||||
import logging
|
||||
import uuid
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, File, Form, HTTPException, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from app.schemas.jobs import JobCreateResponse, JobDeleteResponse, JobStatus, JobStatusResponse
|
||||
from app.schemas.process import ProcessingOptions
|
||||
from app.services import facefusion_bridge, file_manager
|
||||
from app.services.worker import worker_queue
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1/jobs', tags=['jobs'])
|
||||
|
||||
|
||||
@router.post('', response_model=JobCreateResponse)
|
||||
async def create_job(
|
||||
target: UploadFile = File(...),
|
||||
source: Optional[List[UploadFile]] = File(None),
|
||||
options: Optional[str] = Form(None),
|
||||
):
|
||||
"""Create an async processing job."""
|
||||
job_id = str(uuid.uuid4())
|
||||
request_dir = file_manager.create_request_dir()
|
||||
|
||||
try:
|
||||
parsed_options = None
|
||||
if options:
|
||||
try:
|
||||
parsed_options = json.loads(options)
|
||||
ProcessingOptions(**parsed_options)
|
||||
except (json.JSONDecodeError, Exception) as e:
|
||||
raise HTTPException(status_code=422, detail=f'Invalid options: {e}')
|
||||
|
||||
target_path = await file_manager.save_upload(target, request_dir)
|
||||
source_paths = []
|
||||
if source:
|
||||
source_paths = await file_manager.save_uploads(source, request_dir)
|
||||
|
||||
output_path = file_manager.generate_output_path(target_path)
|
||||
|
||||
args = facefusion_bridge.build_args_from_options(
|
||||
source_paths=source_paths,
|
||||
target_path=target_path,
|
||||
output_path=output_path,
|
||||
options=parsed_options,
|
||||
)
|
||||
|
||||
worker_queue.submit(job_id, args)
|
||||
|
||||
return JobCreateResponse(job_id=job_id, status=JobStatus.pending)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
file_manager.cleanup_directory(request_dir)
|
||||
logger.error(f'Job creation failed: {e}')
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get('/{job_id}', response_model=JobStatusResponse)
|
||||
async def get_job_status(job_id: str):
|
||||
"""Get job status."""
|
||||
job = worker_queue.get_job(job_id)
|
||||
if not job:
|
||||
raise HTTPException(status_code=404, detail='Job not found')
|
||||
|
||||
return JobStatusResponse(
|
||||
job_id=job.job_id,
|
||||
status=JobStatus(job.status.value),
|
||||
created_at=job.created_at,
|
||||
updated_at=job.updated_at,
|
||||
error=job.error,
|
||||
)
|
||||
|
||||
|
||||
@router.get('/{job_id}/result')
|
||||
async def get_job_result(job_id: str):
|
||||
"""Download job result. Only available when job is completed."""
|
||||
job = worker_queue.get_job(job_id)
|
||||
if not job:
|
||||
raise HTTPException(status_code=404, detail='Job not found')
|
||||
|
||||
if job.status != 'completed':
|
||||
raise HTTPException(status_code=409, detail=f'Job status is {job.status}, not completed')
|
||||
|
||||
if not job.output_path:
|
||||
raise HTTPException(status_code=500, detail='No output file')
|
||||
|
||||
return FileResponse(
|
||||
path=job.output_path,
|
||||
media_type='application/octet-stream',
|
||||
)
|
||||
|
||||
|
||||
@router.delete('/{job_id}', response_model=JobDeleteResponse)
|
||||
async def delete_job(job_id: str):
|
||||
"""Cancel/delete a job."""
|
||||
deleted = worker_queue.delete_job(job_id)
|
||||
if not deleted:
|
||||
raise HTTPException(status_code=404, detail='Job not found')
|
||||
|
||||
return JobDeleteResponse(job_id=job_id, deleted=True)
|
||||
70
app/routers/process.py
Normal file
70
app/routers/process.py
Normal file
@@ -0,0 +1,70 @@
|
||||
import json
|
||||
import logging
|
||||
from time import time
|
||||
from typing import List, Optional
|
||||
|
||||
from fastapi import APIRouter, File, Form, HTTPException, UploadFile
|
||||
from fastapi.responses import FileResponse
|
||||
|
||||
from app.schemas.process import ProcessingOptions
|
||||
from app.services import facefusion_bridge, file_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1', tags=['processing'])
|
||||
|
||||
|
||||
@router.post('/process')
|
||||
async def process_sync(
|
||||
target: UploadFile = File(...),
|
||||
source: Optional[List[UploadFile]] = File(None),
|
||||
options: Optional[str] = Form(None),
|
||||
):
|
||||
"""Synchronous face processing. Returns the result file directly."""
|
||||
request_dir = file_manager.create_request_dir()
|
||||
|
||||
try:
|
||||
# Parse options
|
||||
parsed_options = None
|
||||
if options:
|
||||
try:
|
||||
parsed_options = json.loads(options)
|
||||
ProcessingOptions(**parsed_options) # validate
|
||||
except (json.JSONDecodeError, Exception) as e:
|
||||
raise HTTPException(status_code=422, detail=f'Invalid options: {e}')
|
||||
|
||||
# Save uploads
|
||||
target_path = await file_manager.save_upload(target, request_dir)
|
||||
source_paths = []
|
||||
if source:
|
||||
source_paths = await file_manager.save_uploads(source, request_dir)
|
||||
|
||||
output_path = file_manager.generate_output_path(target_path)
|
||||
|
||||
# Build args and process
|
||||
args = facefusion_bridge.build_args_from_options(
|
||||
source_paths=source_paths,
|
||||
target_path=target_path,
|
||||
output_path=output_path,
|
||||
options=parsed_options,
|
||||
)
|
||||
|
||||
start_time = time()
|
||||
facefusion_bridge.process_sync(args)
|
||||
processing_time = time() - start_time
|
||||
|
||||
logger.info(f'Sync processing completed in {processing_time:.2f}s')
|
||||
|
||||
return FileResponse(
|
||||
path=output_path,
|
||||
media_type='application/octet-stream',
|
||||
filename=target.filename,
|
||||
headers={'X-Processing-Time': f'{processing_time:.2f}'},
|
||||
)
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f'Processing failed: {e}')
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
finally:
|
||||
file_manager.cleanup_directory(request_dir)
|
||||
47
app/routers/processors.py
Normal file
47
app/routers/processors.py
Normal file
@@ -0,0 +1,47 @@
|
||||
import logging
|
||||
from typing import List
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
from app.schemas.system import ModelInfo, ProcessorInfo
|
||||
from app.services import facefusion_bridge, file_manager
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1', tags=['processors'])
|
||||
|
||||
|
||||
@router.get('/processors', response_model=List[ProcessorInfo])
|
||||
async def list_processors():
|
||||
"""List available processors and their models."""
|
||||
try:
|
||||
processor_names = facefusion_bridge.get_available_processors()
|
||||
result = []
|
||||
for name in processor_names:
|
||||
result.append(ProcessorInfo(name=name, models=[]))
|
||||
return result
|
||||
except Exception as e:
|
||||
logger.error(f'Failed to list processors: {e}')
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
|
||||
|
||||
@router.get('/models', response_model=List[ModelInfo])
|
||||
async def list_models():
|
||||
"""List downloaded model files."""
|
||||
models = file_manager.list_model_files()
|
||||
return [ModelInfo(name=name, path=path, size_bytes=size) for name, path, size in models]
|
||||
|
||||
|
||||
@router.post('/models/download')
|
||||
async def download_models(processors: List[str] = None):
|
||||
"""Trigger model download for specified processors."""
|
||||
try:
|
||||
success = facefusion_bridge.force_download_models(processors)
|
||||
if success:
|
||||
return {'status': 'ok', 'message': 'Models downloaded successfully'}
|
||||
raise HTTPException(status_code=500, detail='Some models failed to download')
|
||||
except HTTPException:
|
||||
raise
|
||||
except Exception as e:
|
||||
logger.error(f'Model download failed: {e}')
|
||||
raise HTTPException(status_code=500, detail=str(e))
|
||||
56
app/routers/system.py
Normal file
56
app/routers/system.py
Normal file
@@ -0,0 +1,56 @@
|
||||
import logging
|
||||
import os
|
||||
|
||||
import psutil
|
||||
|
||||
from fastapi import APIRouter
|
||||
|
||||
from app.schemas.system import GpuDevice, HealthResponse, SystemInfoResponse
|
||||
from app.services import facefusion_bridge
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1', tags=['system'])
|
||||
|
||||
|
||||
@router.get('/health', response_model=HealthResponse)
|
||||
async def health_check():
|
||||
return HealthResponse()
|
||||
|
||||
|
||||
@router.get('/system', response_model=SystemInfoResponse)
|
||||
async def system_info():
|
||||
providers = facefusion_bridge.get_execution_providers()
|
||||
gpu_devices = _detect_gpu_devices()
|
||||
mem = psutil.virtual_memory()
|
||||
|
||||
return SystemInfoResponse(
|
||||
execution_providers=providers,
|
||||
gpu_devices=gpu_devices,
|
||||
cpu_count=os.cpu_count(),
|
||||
memory_total=mem.total,
|
||||
memory_available=mem.available,
|
||||
)
|
||||
|
||||
|
||||
def _detect_gpu_devices():
|
||||
devices = []
|
||||
try:
|
||||
import subprocess
|
||||
result = subprocess.run(
|
||||
['nvidia-smi', '--query-gpu=index,name,memory.total,memory.used', '--format=csv,noheader,nounits'],
|
||||
capture_output=True, text=True, timeout=5,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
for line in result.stdout.strip().split('\n'):
|
||||
parts = [p.strip() for p in line.split(',')]
|
||||
if len(parts) >= 4:
|
||||
devices.append(GpuDevice(
|
||||
id=int(parts[0]),
|
||||
name=parts[1],
|
||||
memory_total=int(float(parts[2])) * 1024 * 1024,
|
||||
memory_used=int(float(parts[3])) * 1024 * 1024,
|
||||
))
|
||||
except (FileNotFoundError, subprocess.TimeoutExpired):
|
||||
pass
|
||||
return devices
|
||||
0
app/schemas/__init__.py
Normal file
0
app/schemas/__init__.py
Normal file
31
app/schemas/jobs.py
Normal file
31
app/schemas/jobs.py
Normal file
@@ -0,0 +1,31 @@
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class JobStatus(str, Enum):
|
||||
pending = 'pending'
|
||||
processing = 'processing'
|
||||
completed = 'completed'
|
||||
failed = 'failed'
|
||||
cancelled = 'cancelled'
|
||||
|
||||
|
||||
class JobCreateResponse(BaseModel):
|
||||
job_id: str
|
||||
status: JobStatus
|
||||
|
||||
|
||||
class JobStatusResponse(BaseModel):
|
||||
job_id: str
|
||||
status: JobStatus
|
||||
created_at: datetime
|
||||
updated_at: Optional[datetime] = None
|
||||
error: Optional[str] = None
|
||||
|
||||
|
||||
class JobDeleteResponse(BaseModel):
|
||||
job_id: str
|
||||
deleted: bool
|
||||
114
app/schemas/process.py
Normal file
114
app/schemas/process.py
Normal file
@@ -0,0 +1,114 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class FaceSwapperOptions(BaseModel):
|
||||
model: str = 'hyperswap_1a_256'
|
||||
pixel_boost: Optional[str] = None
|
||||
weight: float = 0.5
|
||||
|
||||
|
||||
class FaceEnhancerOptions(BaseModel):
|
||||
model: str = 'gfpgan_1.4'
|
||||
blend: int = 80
|
||||
weight: float = 0.5
|
||||
|
||||
|
||||
class FaceEditorOptions(BaseModel):
|
||||
model: str = 'live_portrait'
|
||||
eyebrow_direction: Optional[float] = None
|
||||
eye_gaze_horizontal: Optional[float] = None
|
||||
eye_gaze_vertical: Optional[float] = None
|
||||
eye_open_ratio: Optional[float] = None
|
||||
lip_open_ratio: Optional[float] = None
|
||||
mouth_grim: Optional[float] = None
|
||||
mouth_pout: Optional[float] = None
|
||||
mouth_purse: Optional[float] = None
|
||||
mouth_smile: Optional[float] = None
|
||||
mouth_position_horizontal: Optional[float] = None
|
||||
mouth_position_vertical: Optional[float] = None
|
||||
head_pitch: Optional[float] = None
|
||||
head_yaw: Optional[float] = None
|
||||
head_roll: Optional[float] = None
|
||||
|
||||
|
||||
class LipSyncerOptions(BaseModel):
|
||||
model: str = 'wav2lip_96'
|
||||
|
||||
|
||||
class AgeModifierOptions(BaseModel):
|
||||
model: str = 'styleganex_age'
|
||||
direction: int = 0
|
||||
|
||||
|
||||
class ExpressionRestorerOptions(BaseModel):
|
||||
model: str = 'live_portrait'
|
||||
factor: int = 80
|
||||
|
||||
|
||||
class FrameEnhancerOptions(BaseModel):
|
||||
model: str = 'span_kendata_1x'
|
||||
blend: int = 80
|
||||
|
||||
|
||||
class FrameColorizerOptions(BaseModel):
|
||||
model: str = 'ddcolor'
|
||||
blend: int = 80
|
||||
size: str = '256x256'
|
||||
|
||||
|
||||
class BackgroundRemoverOptions(BaseModel):
|
||||
model: str = 'isnet_general_use'
|
||||
|
||||
|
||||
class FaceDetectorOptions(BaseModel):
|
||||
model: str = 'yolo_face'
|
||||
size: str = '640x640'
|
||||
score: float = 0.5
|
||||
|
||||
|
||||
class FaceSelectorOptions(BaseModel):
|
||||
mode: str = 'reference'
|
||||
order: str = 'large-small'
|
||||
gender: Optional[str] = None
|
||||
race: Optional[str] = None
|
||||
age_start: Optional[int] = None
|
||||
age_end: Optional[int] = None
|
||||
|
||||
|
||||
class OutputOptions(BaseModel):
|
||||
image_quality: int = 80
|
||||
image_scale: float = 1.0
|
||||
video_encoder: Optional[str] = None
|
||||
video_preset: str = 'veryfast'
|
||||
video_quality: int = 80
|
||||
video_scale: float = 1.0
|
||||
video_fps: Optional[float] = None
|
||||
audio_encoder: Optional[str] = None
|
||||
audio_quality: int = 80
|
||||
audio_volume: int = 100
|
||||
|
||||
|
||||
class ProcessingOptions(BaseModel):
|
||||
processors: List[str] = Field(default_factory=lambda: ['face_swapper'])
|
||||
face_swapper: Optional[FaceSwapperOptions] = None
|
||||
face_enhancer: Optional[FaceEnhancerOptions] = None
|
||||
face_editor: Optional[FaceEditorOptions] = None
|
||||
lip_syncer: Optional[LipSyncerOptions] = None
|
||||
age_modifier: Optional[AgeModifierOptions] = None
|
||||
expression_restorer: Optional[ExpressionRestorerOptions] = None
|
||||
frame_enhancer: Optional[FrameEnhancerOptions] = None
|
||||
frame_colorizer: Optional[FrameColorizerOptions] = None
|
||||
background_remover: Optional[BackgroundRemoverOptions] = None
|
||||
face_detector: Optional[FaceDetectorOptions] = None
|
||||
face_selector: Optional[FaceSelectorOptions] = None
|
||||
output: Optional[OutputOptions] = None
|
||||
execution_providers: Optional[List[str]] = None
|
||||
execution_thread_count: Optional[int] = None
|
||||
video_memory_strategy: Optional[str] = None
|
||||
|
||||
|
||||
class ProcessingResponse(BaseModel):
|
||||
output_path: str
|
||||
processing_time: float
|
||||
33
app/schemas/system.py
Normal file
33
app/schemas/system.py
Normal file
@@ -0,0 +1,33 @@
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from pydantic import BaseModel
|
||||
|
||||
|
||||
class HealthResponse(BaseModel):
|
||||
status: str = 'ok'
|
||||
|
||||
|
||||
class GpuDevice(BaseModel):
|
||||
id: int
|
||||
name: str
|
||||
memory_total: Optional[int] = None
|
||||
memory_used: Optional[int] = None
|
||||
|
||||
|
||||
class SystemInfoResponse(BaseModel):
|
||||
execution_providers: List[str]
|
||||
gpu_devices: List[GpuDevice]
|
||||
cpu_count: Optional[int] = None
|
||||
memory_total: Optional[int] = None
|
||||
memory_available: Optional[int] = None
|
||||
|
||||
|
||||
class ProcessorInfo(BaseModel):
|
||||
name: str
|
||||
models: List[str]
|
||||
|
||||
|
||||
class ModelInfo(BaseModel):
|
||||
name: str
|
||||
path: str
|
||||
size_bytes: int
|
||||
0
app/services/__init__.py
Normal file
0
app/services/__init__.py
Normal file
459
app/services/facefusion_bridge.py
Normal file
459
app/services/facefusion_bridge.py
Normal file
@@ -0,0 +1,459 @@
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
from typing import Any, Dict, List, Optional
|
||||
|
||||
from app.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_initialized = False
|
||||
_lock = threading.Lock()
|
||||
|
||||
# All job_keys registered by program.py
|
||||
JOB_KEYS = [
|
||||
'config_path', 'temp_path', 'jobs_path',
|
||||
'source_pattern', 'target_pattern', 'output_pattern',
|
||||
'download_providers', 'download_scope',
|
||||
'execution_device_ids', 'execution_providers', 'execution_thread_count',
|
||||
'video_memory_strategy', 'system_memory_limit',
|
||||
'log_level', 'halt_on_error',
|
||||
]
|
||||
|
||||
# All step_keys registered by program.py + processor modules
|
||||
STEP_KEYS = [
|
||||
# paths
|
||||
'source_paths', 'target_path', 'output_path',
|
||||
# face detector
|
||||
'face_detector_model', 'face_detector_size', 'face_detector_margin',
|
||||
'face_detector_angles', 'face_detector_score',
|
||||
# face landmarker
|
||||
'face_landmarker_model', 'face_landmarker_score',
|
||||
# face selector
|
||||
'face_selector_mode', 'face_selector_order', 'face_selector_gender',
|
||||
'face_selector_race', 'face_selector_age_start', 'face_selector_age_end',
|
||||
'reference_face_position', 'reference_face_distance', 'reference_frame_number',
|
||||
# face masker
|
||||
'face_occluder_model', 'face_parser_model', 'face_mask_types',
|
||||
'face_mask_areas', 'face_mask_regions', 'face_mask_blur', 'face_mask_padding',
|
||||
# voice extractor
|
||||
'voice_extractor_model',
|
||||
# frame extraction
|
||||
'trim_frame_start', 'trim_frame_end', 'temp_frame_format', 'keep_temp',
|
||||
# output creation
|
||||
'output_image_quality', 'output_image_scale', 'output_audio_encoder',
|
||||
'output_audio_quality', 'output_audio_volume', 'output_video_encoder',
|
||||
'output_video_preset', 'output_video_quality', 'output_video_scale',
|
||||
'output_video_fps',
|
||||
# processors
|
||||
'processors',
|
||||
# processor-specific: face_swapper
|
||||
'face_swapper_model', 'face_swapper_pixel_boost', 'face_swapper_weight',
|
||||
# processor-specific: face_enhancer
|
||||
'face_enhancer_model', 'face_enhancer_blend', 'face_enhancer_weight',
|
||||
# processor-specific: face_editor
|
||||
'face_editor_model', 'face_editor_eyebrow_direction',
|
||||
'face_editor_eye_gaze_horizontal', 'face_editor_eye_gaze_vertical',
|
||||
'face_editor_eye_open_ratio', 'face_editor_lip_open_ratio',
|
||||
'face_editor_mouth_grim', 'face_editor_mouth_pout', 'face_editor_mouth_purse',
|
||||
'face_editor_mouth_smile', 'face_editor_mouth_position_horizontal',
|
||||
'face_editor_mouth_position_vertical', 'face_editor_head_pitch',
|
||||
'face_editor_head_yaw', 'face_editor_head_roll',
|
||||
# processor-specific: lip_syncer
|
||||
'lip_syncer_model', 'lip_syncer_weight',
|
||||
# processor-specific: age_modifier
|
||||
'age_modifier_model', 'age_modifier_direction',
|
||||
# processor-specific: expression_restorer
|
||||
'expression_restorer_model', 'expression_restorer_factor', 'expression_restorer_areas',
|
||||
# processor-specific: frame_enhancer
|
||||
'frame_enhancer_model', 'frame_enhancer_blend',
|
||||
# processor-specific: frame_colorizer
|
||||
'frame_colorizer_model', 'frame_colorizer_blend', 'frame_colorizer_size',
|
||||
# processor-specific: background_remover
|
||||
'background_remover_model', 'background_remover_color',
|
||||
# processor-specific: deep_swapper
|
||||
'deep_swapper_model', 'deep_swapper_morph',
|
||||
# processor-specific: face_debugger
|
||||
'face_debugger_items',
|
||||
]
|
||||
|
||||
|
||||
def _find_submodule_root() -> str:
|
||||
"""Find the FaceFusion submodule root directory.
|
||||
|
||||
In development: {project_root}/facefusion/
|
||||
In Docker: /app/facefusion-src/
|
||||
"""
|
||||
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
candidates = [
|
||||
os.path.join(project_root, 'facefusion'), # dev: project_root/facefusion/
|
||||
'/app/facefusion-src', # Docker
|
||||
]
|
||||
for candidate in candidates:
|
||||
if os.path.isdir(os.path.join(candidate, 'facefusion')):
|
||||
return candidate
|
||||
raise RuntimeError('FaceFusion submodule not found')
|
||||
|
||||
|
||||
def _setup_sys_path() -> None:
|
||||
submodule_root = _find_submodule_root()
|
||||
if submodule_root not in sys.path:
|
||||
sys.path.insert(0, submodule_root)
|
||||
|
||||
|
||||
def _find_config_path() -> str:
|
||||
"""Find facefusion.ini - check submodule root first, then project root."""
|
||||
submodule_root = _find_submodule_root()
|
||||
project_root = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
||||
for base in (submodule_root, project_root):
|
||||
path = os.path.join(base, 'facefusion.ini')
|
||||
if os.path.isfile(path):
|
||||
return path
|
||||
return os.path.join(project_root, 'facefusion.ini')
|
||||
|
||||
|
||||
def _setup_models_symlink() -> None:
|
||||
submodule_root = _find_submodule_root()
|
||||
assets_dir = os.path.join(submodule_root, '.assets')
|
||||
os.makedirs(assets_dir, exist_ok=True)
|
||||
|
||||
models_link = os.path.join(assets_dir, 'models')
|
||||
models_target = settings.models_dir
|
||||
os.makedirs(models_target, exist_ok=True)
|
||||
|
||||
if os.path.islink(models_link):
|
||||
if os.readlink(models_link) == models_target:
|
||||
return
|
||||
os.unlink(models_link)
|
||||
elif os.path.isdir(models_link):
|
||||
# Real dir exists - leave it alone in dev mode
|
||||
return
|
||||
|
||||
os.symlink(models_target, models_link)
|
||||
logger.info(f'Symlinked {models_link} -> {models_target}')
|
||||
|
||||
|
||||
def initialize() -> None:
|
||||
global _initialized
|
||||
if _initialized:
|
||||
return
|
||||
|
||||
_setup_sys_path()
|
||||
_setup_models_symlink()
|
||||
|
||||
from facefusion import state_manager
|
||||
from facefusion.jobs import job_manager, job_store
|
||||
|
||||
# Register all keys (replicating program.py side-effects)
|
||||
job_store.register_job_keys(JOB_KEYS)
|
||||
job_store.register_step_keys(STEP_KEYS)
|
||||
|
||||
# Initialize state with API defaults
|
||||
defaults = _build_defaults()
|
||||
for key, value in defaults.items():
|
||||
state_manager.init_item(key, value)
|
||||
|
||||
# Initialize job system
|
||||
os.makedirs(settings.jobs_dir, exist_ok=True)
|
||||
if not job_manager.init_jobs(settings.jobs_dir):
|
||||
raise RuntimeError('Failed to initialize FaceFusion job system')
|
||||
|
||||
# Initialize logger
|
||||
from facefusion import logger as ff_logger
|
||||
ff_logger.init(settings.log_level)
|
||||
|
||||
_initialized = True
|
||||
logger.info('FaceFusion bridge initialized')
|
||||
|
||||
|
||||
def _build_defaults() -> Dict[str, Any]:
|
||||
return {
|
||||
'command': 'headless-run',
|
||||
'config_path': _find_config_path(),
|
||||
'temp_path': settings.temp_dir,
|
||||
'jobs_path': settings.jobs_dir,
|
||||
'source_paths': None,
|
||||
'target_path': None,
|
||||
'output_path': None,
|
||||
'source_pattern': None,
|
||||
'target_pattern': None,
|
||||
'output_pattern': None,
|
||||
# face detector
|
||||
'face_detector_model': settings.face_detector_model,
|
||||
'face_detector_size': '640x640',
|
||||
'face_detector_margin': [0, 0, 0, 0],
|
||||
'face_detector_angles': [0],
|
||||
'face_detector_score': 0.5,
|
||||
# face landmarker
|
||||
'face_landmarker_model': '2dfan4',
|
||||
'face_landmarker_score': 0.5,
|
||||
# face selector
|
||||
'face_selector_mode': 'reference',
|
||||
'face_selector_order': 'large-small',
|
||||
'face_selector_age_start': None,
|
||||
'face_selector_age_end': None,
|
||||
'face_selector_gender': None,
|
||||
'face_selector_race': None,
|
||||
'reference_face_position': 0,
|
||||
'reference_face_distance': 0.3,
|
||||
'reference_frame_number': 0,
|
||||
# face masker
|
||||
'face_occluder_model': 'xseg_1',
|
||||
'face_parser_model': 'bisenet_resnet_34',
|
||||
'face_mask_types': ['box'],
|
||||
'face_mask_areas': ['upper-face', 'lower-face', 'mouth'],
|
||||
'face_mask_regions': ['skin', 'left-eyebrow', 'right-eyebrow', 'left-eye', 'right-eye',
|
||||
'glasses', 'nose', 'mouth', 'upper-lip', 'lower-lip'],
|
||||
'face_mask_blur': 0.3,
|
||||
'face_mask_padding': [0, 0, 0, 0],
|
||||
# voice extractor
|
||||
'voice_extractor_model': 'kim_vocal_2',
|
||||
# frame extraction
|
||||
'trim_frame_start': None,
|
||||
'trim_frame_end': None,
|
||||
'temp_frame_format': 'png',
|
||||
'keep_temp': False,
|
||||
# output creation
|
||||
'output_image_quality': 80,
|
||||
'output_image_scale': 1.0,
|
||||
'output_audio_encoder': None,
|
||||
'output_audio_quality': 80,
|
||||
'output_audio_volume': 100,
|
||||
'output_video_encoder': None,
|
||||
'output_video_preset': 'veryfast',
|
||||
'output_video_quality': 80,
|
||||
'output_video_scale': 1.0,
|
||||
'output_video_fps': None,
|
||||
# processors
|
||||
'processors': ['face_swapper'],
|
||||
# uis (not used in API but needed by apply_args)
|
||||
'open_browser': False,
|
||||
'ui_layouts': None,
|
||||
'ui_workflow': 'instant_runner',
|
||||
# execution
|
||||
'execution_device_ids': [0],
|
||||
'execution_providers': settings.get_execution_providers(),
|
||||
'execution_thread_count': settings.execution_thread_count,
|
||||
# download
|
||||
'download_providers': settings.get_download_providers(),
|
||||
'download_scope': settings.download_scope,
|
||||
# memory
|
||||
'video_memory_strategy': settings.video_memory_strategy,
|
||||
'system_memory_limit': 0,
|
||||
# misc
|
||||
'log_level': settings.log_level,
|
||||
'halt_on_error': False,
|
||||
# benchmark (not used but apply_args references them)
|
||||
'benchmark_mode': None,
|
||||
'benchmark_resolutions': None,
|
||||
'benchmark_cycle_count': None,
|
||||
# jobs
|
||||
'job_id': None,
|
||||
'job_status': None,
|
||||
'step_index': None,
|
||||
# processor-specific defaults
|
||||
'face_swapper_model': 'hyperswap_1a_256',
|
||||
'face_swapper_pixel_boost': None,
|
||||
'face_swapper_weight': 0.5,
|
||||
'face_enhancer_model': 'gfpgan_1.4',
|
||||
'face_enhancer_blend': 80,
|
||||
'face_enhancer_weight': 0.5,
|
||||
'face_editor_model': 'live_portrait',
|
||||
'face_editor_eyebrow_direction': None,
|
||||
'face_editor_eye_gaze_horizontal': None,
|
||||
'face_editor_eye_gaze_vertical': None,
|
||||
'face_editor_eye_open_ratio': None,
|
||||
'face_editor_lip_open_ratio': None,
|
||||
'face_editor_mouth_grim': None,
|
||||
'face_editor_mouth_pout': None,
|
||||
'face_editor_mouth_purse': None,
|
||||
'face_editor_mouth_smile': None,
|
||||
'face_editor_mouth_position_horizontal': None,
|
||||
'face_editor_mouth_position_vertical': None,
|
||||
'face_editor_head_pitch': None,
|
||||
'face_editor_head_yaw': None,
|
||||
'face_editor_head_roll': None,
|
||||
'lip_syncer_model': 'wav2lip_96',
|
||||
'lip_syncer_weight': None,
|
||||
'age_modifier_model': 'styleganex_age',
|
||||
'age_modifier_direction': 0,
|
||||
'expression_restorer_model': 'live_portrait',
|
||||
'expression_restorer_factor': 80,
|
||||
'expression_restorer_areas': None,
|
||||
'frame_enhancer_model': 'span_kendata_1x',
|
||||
'frame_enhancer_blend': 80,
|
||||
'frame_colorizer_model': 'ddcolor',
|
||||
'frame_colorizer_blend': 80,
|
||||
'frame_colorizer_size': '256x256',
|
||||
'background_remover_model': 'isnet_general_use',
|
||||
'background_remover_color': None,
|
||||
'deep_swapper_model': None,
|
||||
'deep_swapper_morph': None,
|
||||
'face_debugger_items': None,
|
||||
}
|
||||
|
||||
|
||||
def build_args_from_options(
|
||||
source_paths: List[str],
|
||||
target_path: str,
|
||||
output_path: str,
|
||||
options: Optional[Dict[str, Any]] = None,
|
||||
) -> Dict[str, Any]:
|
||||
"""Build a FaceFusion args dict from API request options."""
|
||||
args = dict(_build_defaults())
|
||||
args['source_paths'] = source_paths
|
||||
args['target_path'] = target_path
|
||||
args['output_path'] = output_path
|
||||
|
||||
if not options:
|
||||
return args
|
||||
|
||||
if 'processors' in options:
|
||||
args['processors'] = options['processors']
|
||||
|
||||
# Face detector options
|
||||
fd = options.get('face_detector')
|
||||
if fd:
|
||||
if 'model' in fd:
|
||||
args['face_detector_model'] = fd['model']
|
||||
if 'size' in fd:
|
||||
args['face_detector_size'] = fd['size']
|
||||
if 'score' in fd:
|
||||
args['face_detector_score'] = fd['score']
|
||||
|
||||
# Face selector options
|
||||
fs = options.get('face_selector')
|
||||
if fs:
|
||||
for key in ('mode', 'order', 'gender', 'race', 'age_start', 'age_end'):
|
||||
if key in fs:
|
||||
args[f'face_selector_{key}'] = fs[key]
|
||||
|
||||
# Output options
|
||||
out = options.get('output')
|
||||
if out:
|
||||
for key in ('image_quality', 'image_scale', 'video_encoder', 'video_preset',
|
||||
'video_quality', 'video_scale', 'video_fps', 'audio_encoder',
|
||||
'audio_quality', 'audio_volume'):
|
||||
if key in out:
|
||||
args[f'output_{key}'] = out[key]
|
||||
|
||||
# Execution overrides
|
||||
if 'execution_providers' in options:
|
||||
args['execution_providers'] = options['execution_providers']
|
||||
if 'execution_thread_count' in options:
|
||||
args['execution_thread_count'] = options['execution_thread_count']
|
||||
if 'video_memory_strategy' in options:
|
||||
args['video_memory_strategy'] = options['video_memory_strategy']
|
||||
|
||||
# Per-processor options
|
||||
_processor_option_map = {
|
||||
'face_swapper': ['model', 'pixel_boost', 'weight'],
|
||||
'face_enhancer': ['model', 'blend', 'weight'],
|
||||
'face_editor': [
|
||||
'model', 'eyebrow_direction', 'eye_gaze_horizontal', 'eye_gaze_vertical',
|
||||
'eye_open_ratio', 'lip_open_ratio', 'mouth_grim', 'mouth_pout',
|
||||
'mouth_purse', 'mouth_smile', 'mouth_position_horizontal',
|
||||
'mouth_position_vertical', 'head_pitch', 'head_yaw', 'head_roll',
|
||||
],
|
||||
'lip_syncer': ['model', 'weight'],
|
||||
'age_modifier': ['model', 'direction'],
|
||||
'expression_restorer': ['model', 'factor', 'areas'],
|
||||
'frame_enhancer': ['model', 'blend'],
|
||||
'frame_colorizer': ['model', 'blend', 'size'],
|
||||
'background_remover': ['model', 'color'],
|
||||
'deep_swapper': ['model', 'morph'],
|
||||
'face_debugger': ['items'],
|
||||
}
|
||||
|
||||
for processor_name, fields in _processor_option_map.items():
|
||||
proc_opts = options.get(processor_name)
|
||||
if proc_opts:
|
||||
for field in fields:
|
||||
if field in proc_opts:
|
||||
args[f'{processor_name}_{field}'] = proc_opts[field]
|
||||
|
||||
return args
|
||||
|
||||
|
||||
def process_sync(args: Dict[str, Any]) -> bool:
|
||||
"""Run FaceFusion processing synchronously. Must be called with _lock held."""
|
||||
with _lock:
|
||||
from facefusion import state_manager
|
||||
from facefusion.args import apply_args, reduce_step_args
|
||||
from facefusion.core import conditional_process, common_pre_check, processors_pre_check
|
||||
from facefusion.jobs import job_helper, job_manager, job_runner
|
||||
|
||||
# Apply args to state
|
||||
apply_args(args, state_manager.set_item)
|
||||
|
||||
# Create and run job
|
||||
job_id = job_helper.suggest_job_id('api')
|
||||
step_args = reduce_step_args(args)
|
||||
|
||||
if not job_manager.create_job(job_id):
|
||||
raise RuntimeError(f'Failed to create job {job_id}')
|
||||
if not job_manager.add_step(job_id, step_args):
|
||||
raise RuntimeError(f'Failed to add step to job {job_id}')
|
||||
if not job_manager.submit_job(job_id):
|
||||
raise RuntimeError(f'Failed to submit job {job_id}')
|
||||
|
||||
from facefusion.core import process_step
|
||||
success = job_runner.run_job(job_id, process_step)
|
||||
|
||||
if not success:
|
||||
raise RuntimeError(f'Job {job_id} failed')
|
||||
|
||||
return success
|
||||
|
||||
|
||||
def get_available_processors() -> List[str]:
|
||||
"""Return list of available processor names."""
|
||||
_setup_sys_path()
|
||||
from facefusion.filesystem import get_file_name, resolve_file_paths
|
||||
return [get_file_name(p) for p in resolve_file_paths('facefusion/processors/modules')]
|
||||
|
||||
|
||||
def get_execution_providers() -> List[str]:
|
||||
"""Return list of available ONNX execution providers."""
|
||||
_setup_sys_path()
|
||||
from facefusion.execution import get_available_execution_providers
|
||||
return get_available_execution_providers()
|
||||
|
||||
|
||||
def force_download_models(processors: Optional[List[str]] = None) -> bool:
|
||||
"""Download models for specified processors (or all if None)."""
|
||||
with _lock:
|
||||
from facefusion import content_analyser, face_classifier, face_detector, face_landmarker, face_masker, face_recognizer, state_manager, voice_extractor
|
||||
from facefusion.download import conditional_download_hashes, conditional_download_sources
|
||||
from facefusion.filesystem import get_file_name, resolve_file_paths
|
||||
from facefusion.processors.core import get_processors_modules
|
||||
|
||||
common_modules = [
|
||||
content_analyser, face_classifier, face_detector,
|
||||
face_landmarker, face_masker, face_recognizer, voice_extractor,
|
||||
]
|
||||
|
||||
if processors is None:
|
||||
available = [get_file_name(p) for p in resolve_file_paths('facefusion/processors/modules')]
|
||||
else:
|
||||
available = processors
|
||||
|
||||
processor_modules = get_processors_modules(available)
|
||||
|
||||
for module in common_modules + processor_modules:
|
||||
if hasattr(module, 'create_static_model_set'):
|
||||
for model in module.create_static_model_set(state_manager.get_item('download_scope')).values():
|
||||
model_hash_set = model.get('hashes')
|
||||
model_source_set = model.get('sources')
|
||||
if model_hash_set and model_source_set:
|
||||
if not conditional_download_hashes(model_hash_set) or not conditional_download_sources(model_source_set):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def stop_processing() -> None:
|
||||
"""Signal FaceFusion to stop current processing."""
|
||||
_setup_sys_path()
|
||||
from facefusion import process_manager
|
||||
process_manager.stop()
|
||||
75
app/services/file_manager.py
Normal file
75
app/services/file_manager.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import uuid
|
||||
from typing import List, Tuple
|
||||
|
||||
from fastapi import UploadFile
|
||||
|
||||
from app.config import settings
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def ensure_directories() -> None:
|
||||
for path in (settings.upload_dir, settings.output_dir, settings.models_dir,
|
||||
settings.temp_dir, settings.jobs_dir):
|
||||
os.makedirs(path, exist_ok=True)
|
||||
|
||||
|
||||
def create_request_dir() -> str:
|
||||
request_id = str(uuid.uuid4())
|
||||
request_dir = os.path.join(settings.upload_dir, request_id)
|
||||
os.makedirs(request_dir, exist_ok=True)
|
||||
return request_dir
|
||||
|
||||
|
||||
async def save_upload(file: UploadFile, directory: str) -> str:
|
||||
ext = os.path.splitext(file.filename or '')[1] or ''
|
||||
filename = f'{uuid.uuid4()}{ext}'
|
||||
filepath = os.path.join(directory, filename)
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
while chunk := await file.read(1024 * 1024):
|
||||
f.write(chunk)
|
||||
|
||||
return filepath
|
||||
|
||||
|
||||
async def save_uploads(files: List[UploadFile], directory: str) -> List[str]:
|
||||
paths = []
|
||||
for file in files:
|
||||
path = await save_upload(file, directory)
|
||||
paths.append(path)
|
||||
return paths
|
||||
|
||||
|
||||
def generate_output_path(target_path: str) -> str:
|
||||
ext = os.path.splitext(target_path)[1] or '.png'
|
||||
filename = f'{uuid.uuid4()}{ext}'
|
||||
return os.path.join(settings.output_dir, filename)
|
||||
|
||||
|
||||
def cleanup_directory(directory: str) -> None:
|
||||
if os.path.isdir(directory):
|
||||
shutil.rmtree(directory, ignore_errors=True)
|
||||
|
||||
|
||||
def cleanup_file(filepath: str) -> None:
|
||||
if os.path.isfile(filepath):
|
||||
os.remove(filepath)
|
||||
|
||||
|
||||
def list_model_files() -> List[Tuple[str, str, int]]:
|
||||
"""Return list of (name, path, size_bytes) for all .onnx files in models dir."""
|
||||
models = []
|
||||
models_dir = settings.models_dir
|
||||
if not os.path.isdir(models_dir):
|
||||
return models
|
||||
|
||||
for name in sorted(os.listdir(models_dir)):
|
||||
if name.endswith('.onnx'):
|
||||
path = os.path.join(models_dir, name)
|
||||
size = os.path.getsize(path)
|
||||
models.append((name, path, size))
|
||||
return models
|
||||
115
app/services/worker.py
Normal file
115
app/services/worker.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import logging
|
||||
import queue
|
||||
import threading
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from enum import Enum
|
||||
from typing import Any, Callable, Dict, List, Optional
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ApiJobStatus(str, Enum):
|
||||
pending = 'pending'
|
||||
processing = 'processing'
|
||||
completed = 'completed'
|
||||
failed = 'failed'
|
||||
cancelled = 'cancelled'
|
||||
|
||||
|
||||
@dataclass
|
||||
class ApiJob:
|
||||
job_id: str
|
||||
status: ApiJobStatus = ApiJobStatus.pending
|
||||
created_at: datetime = field(default_factory=datetime.now)
|
||||
updated_at: Optional[datetime] = None
|
||||
error: Optional[str] = None
|
||||
output_path: Optional[str] = None
|
||||
args: Dict[str, Any] = field(default_factory=dict)
|
||||
|
||||
|
||||
class WorkerQueue:
|
||||
def __init__(self) -> None:
|
||||
self._queue: queue.Queue[str] = queue.Queue()
|
||||
self._jobs: Dict[str, ApiJob] = {}
|
||||
self._lock = threading.Lock()
|
||||
self._thread: Optional[threading.Thread] = None
|
||||
self._running = False
|
||||
self._process_fn: Optional[Callable[[Dict[str, Any]], bool]] = None
|
||||
|
||||
def start(self, process_fn: Callable[[Dict[str, Any]], bool]) -> None:
|
||||
self._process_fn = process_fn
|
||||
self._running = True
|
||||
self._thread = threading.Thread(target=self._worker_loop, daemon=True, name='ff-worker')
|
||||
self._thread.start()
|
||||
logger.info('Worker thread started')
|
||||
|
||||
def stop(self) -> None:
|
||||
self._running = False
|
||||
self._queue.put('') # sentinel to unblock
|
||||
if self._thread:
|
||||
self._thread.join(timeout=5)
|
||||
logger.info('Worker thread stopped')
|
||||
|
||||
def submit(self, job_id: str, args: Dict[str, Any]) -> ApiJob:
|
||||
job = ApiJob(job_id=job_id, args=args)
|
||||
with self._lock:
|
||||
self._jobs[job_id] = job
|
||||
self._queue.put(job_id)
|
||||
return job
|
||||
|
||||
def get_job(self, job_id: str) -> Optional[ApiJob]:
|
||||
with self._lock:
|
||||
return self._jobs.get(job_id)
|
||||
|
||||
def delete_job(self, job_id: str) -> bool:
|
||||
with self._lock:
|
||||
job = self._jobs.get(job_id)
|
||||
if not job:
|
||||
return False
|
||||
if job.status == ApiJobStatus.processing:
|
||||
job.status = ApiJobStatus.cancelled
|
||||
from app.services.facefusion_bridge import stop_processing
|
||||
stop_processing()
|
||||
elif job.status == ApiJobStatus.pending:
|
||||
job.status = ApiJobStatus.cancelled
|
||||
self._jobs.pop(job_id, None)
|
||||
return True
|
||||
|
||||
def list_jobs(self) -> List[ApiJob]:
|
||||
with self._lock:
|
||||
return list(self._jobs.values())
|
||||
|
||||
def _worker_loop(self) -> None:
|
||||
while self._running:
|
||||
try:
|
||||
job_id = self._queue.get(timeout=1)
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
if not job_id: # sentinel
|
||||
break
|
||||
|
||||
with self._lock:
|
||||
job = self._jobs.get(job_id)
|
||||
if not job or job.status == ApiJobStatus.cancelled:
|
||||
continue
|
||||
job.status = ApiJobStatus.processing
|
||||
job.updated_at = datetime.now()
|
||||
|
||||
try:
|
||||
self._process_fn(job.args)
|
||||
with self._lock:
|
||||
job.status = ApiJobStatus.completed
|
||||
job.updated_at = datetime.now()
|
||||
job.output_path = job.args.get('output_path')
|
||||
logger.info(f'Job {job_id} completed')
|
||||
except Exception as e:
|
||||
with self._lock:
|
||||
job.status = ApiJobStatus.failed
|
||||
job.updated_at = datetime.now()
|
||||
job.error = str(e)
|
||||
logger.error(f'Job {job_id} failed: {e}')
|
||||
|
||||
|
||||
worker_queue = WorkerQueue()
|
||||
Reference in New Issue
Block a user