fix: align Freepik API paths with OpenAPI spec
Some checks failed
Build and Push Docker Image / build (push) Failing after 9s
Some checks failed
Build and Push Docker Image / build (push) Failing after 9s
The original implementation used guessed endpoint paths that don't match
the actual Freepik API. Key fixes based on their OpenAPI spec:
- Task polling is per-endpoint (e.g. GET /v1/ai/text-to-image/flux-dev/{task-id})
not a generic /v1/ai/tasks/{id}. freepik_client now returns TaskResult
with status_path, and task_tracker polls using that path.
- Fixed endpoint paths: flux-pro -> flux-pro-v1-1, upscale -> image-upscaler,
relight -> image-relight, style-transfer -> image-style-transfer,
expand -> image-expand/flux-pro, inpaint -> ideogram-image-edit,
remove-background -> beta/remove-background, classifier -> classifier/image,
audio-isolate -> audio-isolation, icon -> text-to-icon
- Fixed video paths: kling -> kling-o1-pro with kling-o1 status path,
minimax -> minimax-hailuo-02-1080p, seedance -> seedance-pro-1080p
- Fixed request schemas to match actual API params (e.g. scale_factor
not scale, reference_image not style_reference, image_url for bg removal)
- Fixed response parsing: status is uppercase (COMPLETED not completed),
results in data.generated[] array, classifier returns [{class_name, probability}]
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -14,19 +14,19 @@ from app.schemas.image_editing import (
|
||||
UpscalePrecisionRequest,
|
||||
)
|
||||
from app.services import freepik_client, task_tracker
|
||||
from app.services.freepik_client import TaskResult, _extract_task_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1/edit', tags=['image-editing'])
|
||||
|
||||
|
||||
async def _submit_and_respond(result: dict, sync: bool, metadata: dict) -> TaskResponse | TaskDetail:
|
||||
data = result.get('data', result)
|
||||
freepik_task_id = str(data.get('task_id') or data.get('id', ''))
|
||||
async def _submit_and_respond(result: TaskResult, sync: bool, metadata: dict) -> TaskResponse | TaskDetail:
|
||||
freepik_task_id = _extract_task_id(result.data)
|
||||
if not freepik_task_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
|
||||
internal_id = task_tracker.submit(freepik_task_id, metadata)
|
||||
internal_id = task_tracker.submit(freepik_task_id, result.status_path, metadata)
|
||||
|
||||
if not sync:
|
||||
return TaskResponse(
|
||||
@@ -63,9 +63,10 @@ async def upscale_creative(request: UpscaleCreativeRequest, sync: bool = Query(F
|
||||
result = await freepik_client.upscale_creative(
|
||||
image=request.image,
|
||||
prompt=request.prompt,
|
||||
scale=request.scale,
|
||||
scale_factor=request.scale_factor,
|
||||
creativity=request.creativity,
|
||||
resemblance=request.resemblance,
|
||||
optimized_for=request.optimized_for,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'operation': 'upscale-creative'})
|
||||
|
||||
@@ -74,7 +75,7 @@ async def upscale_creative(request: UpscaleCreativeRequest, sync: bool = Query(F
|
||||
async def upscale_precision(request: UpscalePrecisionRequest, sync: bool = Query(False)):
|
||||
result = await freepik_client.upscale_precision(
|
||||
image=request.image,
|
||||
scale=request.scale,
|
||||
scale_factor=request.scale_factor,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'operation': 'upscale-precision'})
|
||||
|
||||
@@ -84,8 +85,8 @@ async def relight(request: RelightRequest, sync: bool = Query(False)):
|
||||
result = await freepik_client.relight_image(
|
||||
image=request.image,
|
||||
prompt=request.prompt,
|
||||
light_source=request.light_source,
|
||||
intensity=request.intensity,
|
||||
transfer_light_from_reference_image=request.transfer_light_from_reference_image,
|
||||
light_transfer_strength=request.light_transfer_strength,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'operation': 'relight'})
|
||||
|
||||
@@ -94,8 +95,10 @@ async def relight(request: RelightRequest, sync: bool = Query(False)):
|
||||
async def style_transfer(request: StyleTransferRequest, sync: bool = Query(False)):
|
||||
result = await freepik_client.style_transfer(
|
||||
image=request.image,
|
||||
style_reference=request.style_reference,
|
||||
strength=request.strength,
|
||||
reference_image=request.reference_image,
|
||||
prompt=request.prompt,
|
||||
style_strength=request.style_strength,
|
||||
structure_strength=request.structure_strength,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'operation': 'style-transfer'})
|
||||
|
||||
@@ -105,7 +108,6 @@ async def expand(request: ExpandRequest, sync: bool = Query(False)):
|
||||
result = await freepik_client.expand_image(
|
||||
image=request.image,
|
||||
prompt=request.prompt,
|
||||
direction=request.direction,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'operation': 'expand'})
|
||||
|
||||
|
||||
@@ -11,7 +11,8 @@ from app.schemas.image_generation import (
|
||||
MysticRequest,
|
||||
SeedreamRequest,
|
||||
)
|
||||
from app.services import freepik_client, task_tracker
|
||||
from app.services import task_tracker
|
||||
from app.services.freepik_client import TaskResult, _extract_task_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -19,17 +20,16 @@ router = APIRouter(prefix='/api/v1/generate/image', tags=['image-generation'])
|
||||
|
||||
|
||||
async def _submit_and_respond(
|
||||
result: dict,
|
||||
result: TaskResult,
|
||||
sync: bool,
|
||||
metadata: dict | None = None,
|
||||
) -> TaskResponse | TaskDetail:
|
||||
"""Extract task_id from Freepik response, track it, optionally wait."""
|
||||
data = result.get('data', result)
|
||||
freepik_task_id = str(data.get('task_id') or data.get('id', ''))
|
||||
freepik_task_id = _extract_task_id(result.data)
|
||||
if not freepik_task_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
|
||||
internal_id = task_tracker.submit(freepik_task_id, metadata)
|
||||
internal_id = task_tracker.submit(freepik_task_id, result.status_path, metadata)
|
||||
|
||||
if not sync:
|
||||
return TaskResponse(
|
||||
@@ -64,24 +64,27 @@ async def _submit_and_respond(
|
||||
|
||||
@router.post('/mystic', response_model=TaskResponse)
|
||||
async def generate_mystic(request: MysticRequest, sync: bool = Query(False)):
|
||||
from app.services import freepik_client
|
||||
result = await freepik_client.generate_mystic(
|
||||
prompt=request.prompt,
|
||||
negative_prompt=request.negative_prompt,
|
||||
resolution=request.resolution,
|
||||
styling=request.styling,
|
||||
aspect_ratio=request.aspect_ratio,
|
||||
model=request.model,
|
||||
seed=request.seed,
|
||||
num_images=request.num_images,
|
||||
styling=request.styling,
|
||||
structure_reference=request.structure_reference,
|
||||
style_reference=request.style_reference,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'model': 'mystic'})
|
||||
|
||||
|
||||
@router.post('/flux-dev', response_model=TaskResponse)
|
||||
async def generate_flux_dev(request: FluxDevRequest, sync: bool = Query(False)):
|
||||
from app.services import freepik_client
|
||||
result = await freepik_client.generate_flux_dev(
|
||||
prompt=request.prompt,
|
||||
image=request.image,
|
||||
guidance_scale=request.guidance_scale,
|
||||
num_images=request.num_images,
|
||||
aspect_ratio=request.aspect_ratio,
|
||||
styling=request.styling,
|
||||
seed=request.seed,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'model': 'flux-dev'})
|
||||
@@ -89,10 +92,11 @@ async def generate_flux_dev(request: FluxDevRequest, sync: bool = Query(False)):
|
||||
|
||||
@router.post('/flux-pro', response_model=TaskResponse)
|
||||
async def generate_flux_pro(request: FluxProRequest, sync: bool = Query(False)):
|
||||
from app.services import freepik_client
|
||||
result = await freepik_client.generate_flux_pro(
|
||||
prompt=request.prompt,
|
||||
image=request.image,
|
||||
guidance_scale=request.guidance_scale,
|
||||
aspect_ratio=request.aspect_ratio,
|
||||
styling=request.styling,
|
||||
seed=request.seed,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'model': 'flux-pro'})
|
||||
@@ -100,11 +104,10 @@ async def generate_flux_pro(request: FluxProRequest, sync: bool = Query(False)):
|
||||
|
||||
@router.post('/seedream', response_model=TaskResponse)
|
||||
async def generate_seedream(request: SeedreamRequest, sync: bool = Query(False)):
|
||||
from app.services import freepik_client
|
||||
result = await freepik_client.generate_seedream(
|
||||
prompt=request.prompt,
|
||||
image=request.image,
|
||||
aspect_ratio=request.aspect_ratio,
|
||||
num_images=request.num_images,
|
||||
seed=request.seed,
|
||||
)
|
||||
return await _submit_and_respond(result, sync, {'model': 'seedream'})
|
||||
|
||||
@@ -1,30 +1,28 @@
|
||||
import base64
|
||||
import logging
|
||||
from datetime import datetime, timezone
|
||||
|
||||
from fastapi import APIRouter, HTTPException
|
||||
|
||||
from app.schemas.common import TaskResponse, TaskStatus
|
||||
from app.schemas.utilities import ClassificationResponse, IconRequest
|
||||
from app.schemas.utilities import (
|
||||
ClassificationResponse,
|
||||
IconRequest,
|
||||
RemoveBackgroundRequest,
|
||||
RemoveBackgroundResponse,
|
||||
)
|
||||
from app.services import freepik_client, task_tracker
|
||||
from app.services.freepik_client import _extract_task_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1/util', tags=['utilities'])
|
||||
|
||||
|
||||
@router.post('/remove-background')
|
||||
async def remove_background(request: dict):
|
||||
"""Remove background from an image. Returns processed image as base64."""
|
||||
image = request.get('image')
|
||||
if not image:
|
||||
raise HTTPException(status_code=400, detail='image field is required')
|
||||
|
||||
result_bytes = await freepik_client.remove_background(image)
|
||||
return {
|
||||
'image': base64.b64encode(result_bytes).decode(),
|
||||
'content_type': 'image/png',
|
||||
}
|
||||
@router.post('/remove-background', response_model=RemoveBackgroundResponse)
|
||||
async def remove_background(request: RemoveBackgroundRequest):
|
||||
"""Remove background from an image. Takes a URL, returns result URLs."""
|
||||
result = await freepik_client.remove_background(request.image_url)
|
||||
return RemoveBackgroundResponse(**result)
|
||||
|
||||
|
||||
@router.post('/classify', response_model=ClassificationResponse)
|
||||
@@ -35,12 +33,7 @@ async def classify_image(request: dict):
|
||||
raise HTTPException(status_code=400, detail='image field is required')
|
||||
|
||||
result = await freepik_client.classify_image(image)
|
||||
data = result.get('data', result)
|
||||
return ClassificationResponse(
|
||||
is_ai_generated=data.get('is_ai_generated', False),
|
||||
ai_probability=data.get('ai_probability', 0.0),
|
||||
human_probability=data.get('human_probability', 0.0),
|
||||
)
|
||||
return result
|
||||
|
||||
|
||||
@router.post('/audio-isolate', response_model=TaskResponse)
|
||||
@@ -51,12 +44,11 @@ async def audio_isolate(request: dict):
|
||||
raise HTTPException(status_code=400, detail='audio field is required')
|
||||
|
||||
result = await freepik_client.isolate_audio(audio)
|
||||
data = result.get('data', result)
|
||||
freepik_task_id = str(data.get('task_id') or data.get('id', ''))
|
||||
freepik_task_id = _extract_task_id(result.data)
|
||||
if not freepik_task_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
|
||||
internal_id = task_tracker.submit(freepik_task_id, {'operation': 'audio-isolate'})
|
||||
internal_id = task_tracker.submit(freepik_task_id, result.status_path, {'operation': 'audio-isolate'})
|
||||
return TaskResponse(
|
||||
task_id=internal_id,
|
||||
status=TaskStatus.pending,
|
||||
@@ -76,12 +68,11 @@ async def generate_icon(request: IconRequest):
|
||||
shape=request.shape,
|
||||
style=request.style,
|
||||
)
|
||||
data = result.get('data', result)
|
||||
freepik_task_id = str(data.get('task_id') or data.get('id', ''))
|
||||
freepik_task_id = _extract_task_id(result.data)
|
||||
if not freepik_task_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
|
||||
internal_id = task_tracker.submit(freepik_task_id, {'operation': 'icon'})
|
||||
internal_id = task_tracker.submit(freepik_task_id, result.status_path, {'operation': 'icon'})
|
||||
return TaskResponse(
|
||||
task_id=internal_id,
|
||||
status=TaskStatus.pending,
|
||||
|
||||
@@ -6,30 +6,26 @@ from fastapi import APIRouter, HTTPException
|
||||
from app.schemas.common import TaskResponse, TaskStatus
|
||||
from app.schemas.video_generation import KlingRequest, MinimaxRequest, SeedanceRequest
|
||||
from app.services import freepik_client, task_tracker
|
||||
from app.services.freepik_client import _extract_task_id
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
router = APIRouter(prefix='/api/v1/generate/video', tags=['video-generation'])
|
||||
|
||||
|
||||
def _extract_task_id(result: dict) -> str:
|
||||
data = result.get('data', result)
|
||||
freepik_task_id = str(data.get('task_id') or data.get('id', ''))
|
||||
if not freepik_task_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
return freepik_task_id
|
||||
|
||||
|
||||
@router.post('/kling', response_model=TaskResponse)
|
||||
async def generate_video_kling(request: KlingRequest):
|
||||
result = await freepik_client.generate_video_kling(
|
||||
image=request.image,
|
||||
first_frame=request.first_frame,
|
||||
last_frame=request.last_frame,
|
||||
prompt=request.prompt,
|
||||
duration=request.duration,
|
||||
aspect_ratio=request.aspect_ratio,
|
||||
)
|
||||
freepik_id = _extract_task_id(result)
|
||||
internal_id = task_tracker.submit(freepik_id, {'model': 'kling'})
|
||||
freepik_id = _extract_task_id(result.data)
|
||||
if not freepik_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
internal_id = task_tracker.submit(freepik_id, result.status_path, {'model': 'kling'})
|
||||
return TaskResponse(
|
||||
task_id=internal_id,
|
||||
status=TaskStatus.pending,
|
||||
@@ -42,10 +38,12 @@ async def generate_video_minimax(request: MinimaxRequest):
|
||||
result = await freepik_client.generate_video_minimax(
|
||||
prompt=request.prompt,
|
||||
first_frame_image=request.first_frame_image,
|
||||
subject_reference=request.subject_reference,
|
||||
last_frame_image=request.last_frame_image,
|
||||
)
|
||||
freepik_id = _extract_task_id(result)
|
||||
internal_id = task_tracker.submit(freepik_id, {'model': 'minimax'})
|
||||
freepik_id = _extract_task_id(result.data)
|
||||
if not freepik_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
internal_id = task_tracker.submit(freepik_id, result.status_path, {'model': 'minimax'})
|
||||
return TaskResponse(
|
||||
task_id=internal_id,
|
||||
status=TaskStatus.pending,
|
||||
@@ -59,10 +57,11 @@ async def generate_video_seedance(request: SeedanceRequest):
|
||||
prompt=request.prompt,
|
||||
image=request.image,
|
||||
duration=request.duration,
|
||||
resolution=request.resolution,
|
||||
)
|
||||
freepik_id = _extract_task_id(result)
|
||||
internal_id = task_tracker.submit(freepik_id, {'model': 'seedance'})
|
||||
freepik_id = _extract_task_id(result.data)
|
||||
if not freepik_id:
|
||||
raise HTTPException(status_code=502, detail='No task_id in Freepik response')
|
||||
internal_id = task_tracker.submit(freepik_id, result.status_path, {'model': 'seedance'})
|
||||
return TaskResponse(
|
||||
task_id=internal_id,
|
||||
status=TaskStatus.pending,
|
||||
|
||||
@@ -6,36 +6,43 @@ from pydantic import BaseModel, Field
|
||||
class UpscaleCreativeRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
prompt: Optional[str] = None
|
||||
scale: Optional[int] = Field(None, ge=2, le=4)
|
||||
creativity: Optional[float] = Field(None, ge=0.0, le=1.0)
|
||||
resemblance: Optional[float] = Field(None, ge=0.0, le=1.0)
|
||||
scale_factor: Optional[str] = Field(None, description='2x, 4x, 8x, or 16x')
|
||||
creativity: Optional[int] = Field(None, ge=-10, le=10)
|
||||
resemblance: Optional[int] = Field(None, ge=-10, le=10)
|
||||
optimized_for: Optional[str] = Field(
|
||||
None,
|
||||
description='standard, soft_portraits, hard_portraits, art_n_illustration, etc.',
|
||||
)
|
||||
|
||||
|
||||
class UpscalePrecisionRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
scale: Optional[int] = Field(None, ge=2, le=4)
|
||||
scale_factor: Optional[str] = Field(None, description='2x or 4x')
|
||||
|
||||
|
||||
class RelightRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
image: str = Field(..., description='Base64 or URL of image')
|
||||
prompt: Optional[str] = None
|
||||
light_source: Optional[str] = None
|
||||
intensity: Optional[float] = Field(None, ge=0.0, le=1.0)
|
||||
transfer_light_from_reference_image: Optional[str] = Field(
|
||||
None, description='Base64 or URL of reference image for light transfer',
|
||||
)
|
||||
light_transfer_strength: Optional[int] = Field(None, ge=0, le=100)
|
||||
|
||||
|
||||
class StyleTransferRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
style_reference: str = Field(..., description='Base64-encoded style reference image')
|
||||
strength: Optional[float] = Field(None, ge=0.0, le=1.0)
|
||||
image: str = Field(..., description='Base64 or URL of image')
|
||||
reference_image: str = Field(..., description='Base64 or URL of style reference image')
|
||||
prompt: Optional[str] = None
|
||||
style_strength: Optional[int] = Field(None, ge=0, le=100)
|
||||
structure_strength: Optional[int] = Field(None, ge=0, le=100)
|
||||
|
||||
|
||||
class ExpandRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
image: str = Field(..., description='Base64 or URL of image')
|
||||
prompt: Optional[str] = None
|
||||
direction: Optional[str] = Field(None, description='Expansion direction')
|
||||
|
||||
|
||||
class InpaintRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
mask: str = Field(..., description='Base64-encoded mask image')
|
||||
image: str = Field(..., description='Base64 or URL of image')
|
||||
mask: str = Field(..., description='Base64 or URL of mask image')
|
||||
prompt: str = Field(..., min_length=1)
|
||||
|
||||
@@ -5,31 +5,30 @@ from pydantic import BaseModel, Field
|
||||
|
||||
class MysticRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
negative_prompt: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
resolution: Optional[str] = Field(None, description='1k, 2k, or 4k')
|
||||
aspect_ratio: Optional[str] = Field(None, description='e.g. square_1_1, widescreen_16_9')
|
||||
model: Optional[str] = Field(None, description='realism, fluid, zen, flexible, super_real, editorial_portraits')
|
||||
seed: Optional[int] = Field(None, ge=1, le=4294967295)
|
||||
styling: Optional[dict] = None
|
||||
seed: Optional[int] = None
|
||||
num_images: Optional[int] = Field(None, ge=1, le=4)
|
||||
structure_reference: Optional[str] = Field(None, description='Base64 image for structure reference')
|
||||
style_reference: Optional[str] = Field(None, description='Base64 image for style reference')
|
||||
|
||||
|
||||
class FluxDevRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
image: Optional[str] = Field(None, description='Base64-encoded image for img2img')
|
||||
guidance_scale: Optional[float] = Field(None, ge=1.0, le=20.0)
|
||||
num_images: Optional[int] = Field(None, ge=1, le=4)
|
||||
seed: Optional[int] = None
|
||||
aspect_ratio: Optional[str] = Field(None, description='e.g. square_1_1, widescreen_16_9')
|
||||
styling: Optional[dict] = None
|
||||
seed: Optional[int] = Field(None, ge=1, le=4294967295)
|
||||
|
||||
|
||||
class FluxProRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
image: Optional[str] = Field(None, description='Base64-encoded image for img2img')
|
||||
guidance_scale: Optional[float] = Field(None, ge=1.0, le=20.0)
|
||||
seed: Optional[int] = None
|
||||
aspect_ratio: Optional[str] = Field(None, description='e.g. square_1_1, widescreen_16_9')
|
||||
styling: Optional[dict] = None
|
||||
seed: Optional[int] = Field(None, ge=1, le=4294967295)
|
||||
|
||||
|
||||
class SeedreamRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
image: Optional[str] = Field(None, description='Base64-encoded image for img2img')
|
||||
aspect_ratio: Optional[str] = None
|
||||
num_images: Optional[int] = Field(None, ge=1, le=4)
|
||||
seed: Optional[int] = None
|
||||
aspect_ratio: Optional[str] = Field(None, description='e.g. square_1_1, widescreen_16_9')
|
||||
seed: Optional[int] = Field(None, ge=1, le=4294967295)
|
||||
|
||||
@@ -3,10 +3,24 @@ from typing import Optional
|
||||
from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class ClassificationResult(BaseModel):
|
||||
class_name: str
|
||||
probability: float
|
||||
|
||||
|
||||
class ClassificationResponse(BaseModel):
|
||||
is_ai_generated: bool
|
||||
ai_probability: float
|
||||
human_probability: float
|
||||
data: list[ClassificationResult]
|
||||
|
||||
|
||||
class RemoveBackgroundRequest(BaseModel):
|
||||
image_url: str = Field(..., description='URL of image to remove background from')
|
||||
|
||||
|
||||
class RemoveBackgroundResponse(BaseModel):
|
||||
original: Optional[str] = None
|
||||
high_resolution: Optional[str] = None
|
||||
preview: Optional[str] = None
|
||||
url: Optional[str] = None
|
||||
|
||||
|
||||
class IconRequest(BaseModel):
|
||||
|
||||
@@ -4,20 +4,20 @@ from pydantic import BaseModel, Field
|
||||
|
||||
|
||||
class KlingRequest(BaseModel):
|
||||
image: str = Field(..., description='Base64-encoded image')
|
||||
prompt: Optional[str] = None
|
||||
duration: Optional[str] = Field(None, description='5 or 10 seconds')
|
||||
aspect_ratio: Optional[str] = None
|
||||
first_frame: Optional[str] = Field(None, description='Base64 or URL of first frame image')
|
||||
last_frame: Optional[str] = Field(None, description='Base64 or URL of last frame image')
|
||||
prompt: Optional[str] = Field(None, max_length=2500)
|
||||
duration: Optional[int] = Field(None, description='5 or 10 seconds')
|
||||
aspect_ratio: Optional[str] = Field(None, description='16:9, 9:16, or 1:1')
|
||||
|
||||
|
||||
class MinimaxRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
first_frame_image: Optional[str] = Field(None, description='Base64-encoded image')
|
||||
subject_reference: Optional[str] = Field(None, description='Base64-encoded reference image')
|
||||
first_frame_image: Optional[str] = Field(None, description='Base64 or URL of first frame')
|
||||
last_frame_image: Optional[str] = Field(None, description='Base64 or URL of last frame')
|
||||
|
||||
|
||||
class SeedanceRequest(BaseModel):
|
||||
prompt: str = Field(..., min_length=1, max_length=4000)
|
||||
image: Optional[str] = Field(None, description='Base64-encoded image')
|
||||
duration: Optional[str] = None
|
||||
resolution: Optional[str] = None
|
||||
prompt: str = Field(..., min_length=1, max_length=2000)
|
||||
image: Optional[str] = Field(None, description='Base64 or URL of input image')
|
||||
duration: Optional[str] = Field(None, description='5 or 10 seconds')
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
@@ -11,6 +12,13 @@ logger = logging.getLogger(__name__)
|
||||
_client: Optional[httpx.AsyncClient] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskResult:
|
||||
"""Wraps a Freepik API response with the status polling path."""
|
||||
data: dict
|
||||
status_path: str
|
||||
|
||||
|
||||
def get_client() -> httpx.AsyncClient:
|
||||
if _client is None:
|
||||
raise RuntimeError('Freepik client not initialized')
|
||||
@@ -80,268 +88,325 @@ async def _request(
|
||||
)
|
||||
|
||||
|
||||
async def _request_raw(
|
||||
method: str,
|
||||
path: str,
|
||||
*,
|
||||
json: Optional[dict] = None,
|
||||
data: Optional[dict] = None,
|
||||
files: Optional[dict] = None,
|
||||
max_retries: int = 3,
|
||||
) -> bytes:
|
||||
"""Make a request and return raw bytes (for binary responses)."""
|
||||
client = get_client()
|
||||
for attempt in range(max_retries):
|
||||
kwargs: dict[str, Any] = {}
|
||||
if json is not None:
|
||||
kwargs['json'] = json
|
||||
if data is not None:
|
||||
kwargs['data'] = data
|
||||
if files is not None:
|
||||
kwargs['files'] = files
|
||||
|
||||
response = await client.request(method, path, **kwargs)
|
||||
|
||||
if response.status_code == 429:
|
||||
retry_after = int(response.headers.get('Retry-After', 2 ** attempt))
|
||||
logger.warning(f'Rate limited, retrying in {retry_after}s (attempt {attempt + 1})')
|
||||
await asyncio.sleep(retry_after)
|
||||
continue
|
||||
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
|
||||
raise httpx.HTTPStatusError(
|
||||
'Rate limit exceeded after retries',
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
|
||||
def _strip_none(d: dict) -> dict:
|
||||
"""Remove None values from a dict for clean API payloads."""
|
||||
return {k: v for k, v in d.items() if v is not None}
|
||||
|
||||
|
||||
def _extract_task_id(result: dict) -> str:
|
||||
"""Extract task_id from Freepik response (always under data.task_id)."""
|
||||
data = result.get('data', result)
|
||||
if isinstance(data, dict):
|
||||
return str(data.get('task_id', ''))
|
||||
return ''
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Image generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MYSTIC_PATH = '/v1/ai/mystic'
|
||||
|
||||
|
||||
async def generate_mystic(
|
||||
prompt: str,
|
||||
negative_prompt: Optional[str] = None,
|
||||
resolution: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
model: Optional[str] = None,
|
||||
seed: Optional[int] = None,
|
||||
num_images: Optional[int] = None,
|
||||
) -> dict:
|
||||
styling: Optional[dict] = None,
|
||||
structure_reference: Optional[str] = None,
|
||||
style_reference: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'negative_prompt': negative_prompt,
|
||||
'resolution': resolution,
|
||||
'styling': styling,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'model': model,
|
||||
'seed': seed,
|
||||
'num_images': num_images,
|
||||
'styling': styling,
|
||||
'structure_reference': structure_reference,
|
||||
'style_reference': style_reference,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/mystic', json=payload)
|
||||
result = await _request('POST', _MYSTIC_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_MYSTIC_PATH}/{task_id}')
|
||||
|
||||
|
||||
_FLUX_DEV_PATH = '/v1/ai/text-to-image/flux-dev'
|
||||
|
||||
|
||||
async def generate_flux_dev(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
guidance_scale: Optional[float] = None,
|
||||
num_images: Optional[int] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'guidance_scale': guidance_scale,
|
||||
'num_images': num_images,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'styling': styling,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/flux-dev', json=payload)
|
||||
result = await _request('POST', _FLUX_DEV_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_FLUX_DEV_PATH}/{task_id}')
|
||||
|
||||
|
||||
_FLUX_PRO_PATH = '/v1/ai/text-to-image/flux-pro-v1-1'
|
||||
|
||||
|
||||
async def generate_flux_pro(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
guidance_scale: Optional[float] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'guidance_scale': guidance_scale,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'styling': styling,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/flux-pro-1.1', json=payload)
|
||||
result = await _request('POST', _FLUX_PRO_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_FLUX_PRO_PATH}/{task_id}')
|
||||
|
||||
|
||||
_SEEDREAM_PATH = '/v1/ai/text-to-image/seedream'
|
||||
|
||||
|
||||
async def generate_seedream(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
num_images: Optional[int] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'num_images': num_images,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/seedream', json=payload)
|
||||
result = await _request('POST', _SEEDREAM_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_SEEDREAM_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Video generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_KLING_O1_PRO_PATH = '/v1/ai/image-to-video/kling-o1-pro'
|
||||
_KLING_O1_STATUS_PATH = '/v1/ai/image-to-video/kling-o1'
|
||||
|
||||
|
||||
async def generate_video_kling(
|
||||
image: str,
|
||||
first_frame: Optional[str] = None,
|
||||
last_frame: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
duration: Optional[str] = None,
|
||||
duration: Optional[int] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'first_frame': first_frame,
|
||||
'last_frame': last_frame,
|
||||
'prompt': prompt,
|
||||
'duration': duration,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/kling', json=payload)
|
||||
result = await _request('POST', _KLING_O1_PRO_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_KLING_O1_STATUS_PATH}/{task_id}')
|
||||
|
||||
|
||||
_MINIMAX_1080P_PATH = '/v1/ai/image-to-video/minimax-hailuo-02-1080p'
|
||||
|
||||
|
||||
async def generate_video_minimax(
|
||||
prompt: str,
|
||||
first_frame_image: Optional[str] = None,
|
||||
subject_reference: Optional[str] = None,
|
||||
) -> dict:
|
||||
last_frame_image: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'first_frame_image': first_frame_image,
|
||||
'subject_reference': subject_reference,
|
||||
'last_frame_image': last_frame_image,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/minimax', json=payload)
|
||||
result = await _request('POST', _MINIMAX_1080P_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_MINIMAX_1080P_PATH}/{task_id}')
|
||||
|
||||
|
||||
_SEEDANCE_PRO_1080P_PATH = '/v1/ai/image-to-video/seedance-pro-1080p'
|
||||
|
||||
|
||||
async def generate_video_seedance(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
duration: Optional[str] = None,
|
||||
resolution: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'duration': duration,
|
||||
'resolution': resolution,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/seedance', json=payload)
|
||||
result = await _request('POST', _SEEDANCE_PRO_1080P_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_SEEDANCE_PRO_1080P_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Image editing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_UPSCALER_PATH = '/v1/ai/image-upscaler'
|
||||
|
||||
|
||||
async def upscale_creative(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
scale: Optional[int] = None,
|
||||
creativity: Optional[float] = None,
|
||||
resemblance: Optional[float] = None,
|
||||
) -> dict:
|
||||
scale_factor: Optional[str] = None,
|
||||
creativity: Optional[int] = None,
|
||||
resemblance: Optional[int] = None,
|
||||
optimized_for: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'scale': scale,
|
||||
'scale_factor': scale_factor,
|
||||
'creativity': creativity,
|
||||
'resemblance': resemblance,
|
||||
'optimized_for': optimized_for,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/upscale/creative', json=payload)
|
||||
result = await _request('POST', _UPSCALER_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_UPSCALER_PATH}/{task_id}')
|
||||
|
||||
|
||||
_UPSCALER_PRECISION_PATH = '/v1/ai/image-upscaler-precision'
|
||||
|
||||
|
||||
async def upscale_precision(
|
||||
image: str,
|
||||
scale: Optional[int] = None,
|
||||
) -> dict:
|
||||
scale_factor: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'scale': scale,
|
||||
'scale_factor': scale_factor,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/upscale/precision', json=payload)
|
||||
result = await _request('POST', _UPSCALER_PRECISION_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_UPSCALER_PRECISION_PATH}/{task_id}')
|
||||
|
||||
|
||||
_RELIGHT_PATH = '/v1/ai/image-relight'
|
||||
|
||||
|
||||
async def relight_image(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
light_source: Optional[str] = None,
|
||||
intensity: Optional[float] = None,
|
||||
) -> dict:
|
||||
transfer_light_from_reference_image: Optional[str] = None,
|
||||
light_transfer_strength: Optional[int] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'light_source': light_source,
|
||||
'intensity': intensity,
|
||||
'transfer_light_from_reference_image': transfer_light_from_reference_image,
|
||||
'light_transfer_strength': light_transfer_strength,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/relight', json=payload)
|
||||
result = await _request('POST', _RELIGHT_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_RELIGHT_PATH}/{task_id}')
|
||||
|
||||
|
||||
_STYLE_TRANSFER_PATH = '/v1/ai/image-style-transfer'
|
||||
|
||||
|
||||
async def style_transfer(
|
||||
image: str,
|
||||
style_reference: str,
|
||||
strength: Optional[float] = None,
|
||||
) -> dict:
|
||||
reference_image: str,
|
||||
prompt: Optional[str] = None,
|
||||
style_strength: Optional[int] = None,
|
||||
structure_strength: Optional[int] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'style_reference': style_reference,
|
||||
'strength': strength,
|
||||
'reference_image': reference_image,
|
||||
'prompt': prompt,
|
||||
'style_strength': style_strength,
|
||||
'structure_strength': structure_strength,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/style-transfer', json=payload)
|
||||
result = await _request('POST', _STYLE_TRANSFER_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_STYLE_TRANSFER_PATH}/{task_id}')
|
||||
|
||||
|
||||
_EXPAND_PATH = '/v1/ai/image-expand/flux-pro'
|
||||
|
||||
|
||||
async def expand_image(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
direction: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'direction': direction,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/expand', json=payload)
|
||||
result = await _request('POST', _EXPAND_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_EXPAND_PATH}/{task_id}')
|
||||
|
||||
|
||||
_INPAINT_PATH = '/v1/ai/ideogram-image-edit'
|
||||
|
||||
|
||||
async def inpaint(
|
||||
image: str,
|
||||
mask: str,
|
||||
prompt: str,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = {
|
||||
'image': image,
|
||||
'mask': mask,
|
||||
'prompt': prompt,
|
||||
}
|
||||
return await _request('POST', '/v1/ai/inpaint', json=payload)
|
||||
result = await _request('POST', _INPAINT_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_INPAINT_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Utilities
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def remove_background(image: str) -> bytes:
|
||||
return await _request_raw('POST', '/v1/ai/remove-background', json={'image': image})
|
||||
_REMOVE_BG_PATH = '/v1/ai/beta/remove-background'
|
||||
|
||||
|
||||
async def remove_background(image_url: str) -> dict:
|
||||
"""Remove background. Takes an image URL (not base64). Returns URLs."""
|
||||
return await _request(
|
||||
'POST', _REMOVE_BG_PATH,
|
||||
data={'image_url': image_url},
|
||||
)
|
||||
|
||||
|
||||
_CLASSIFIER_PATH = '/v1/ai/classifier/image'
|
||||
|
||||
|
||||
async def classify_image(image: str) -> dict:
|
||||
return await _request('POST', '/v1/ai/classifier', json={'image': image})
|
||||
"""Classify whether image is AI-generated. Returns data: [{class_name, probability}]."""
|
||||
return await _request('POST', _CLASSIFIER_PATH, json={'image': image})
|
||||
|
||||
|
||||
async def isolate_audio(audio: str) -> dict:
|
||||
return await _request('POST', '/v1/ai/audio-isolate', json={'audio': audio})
|
||||
_AUDIO_ISOLATION_PATH = '/v1/ai/audio-isolation'
|
||||
|
||||
|
||||
async def isolate_audio(audio: str) -> TaskResult:
|
||||
result = await _request('POST', _AUDIO_ISOLATION_PATH, json={'audio': audio})
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_AUDIO_ISOLATION_PATH}/{task_id}')
|
||||
|
||||
|
||||
_ICON_PATH = '/v1/ai/text-to-icon'
|
||||
|
||||
|
||||
async def generate_icon(
|
||||
@@ -349,29 +414,32 @@ async def generate_icon(
|
||||
color: Optional[str] = None,
|
||||
shape: Optional[str] = None,
|
||||
style: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'color': color,
|
||||
'shape': shape,
|
||||
'style': style,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/icon', json=payload)
|
||||
result = await _request('POST', _ICON_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_ICON_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Task management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def get_task_status(task_id: str) -> dict:
|
||||
return await _request('GET', f'/v1/ai/tasks/{task_id}')
|
||||
async def get_task_status(status_path: str) -> dict:
|
||||
"""Poll a task's status using its per-endpoint GET path."""
|
||||
return await _request('GET', status_path)
|
||||
|
||||
|
||||
async def check_api_key() -> bool:
|
||||
"""Verify the API key is valid by making a lightweight request."""
|
||||
try:
|
||||
client = get_client()
|
||||
response = await client.get('/v1/ai/tasks', params={'limit': 1})
|
||||
response = await client.get('/v1/ai/mystic', params={'per_page': 1})
|
||||
return response.status_code != 401
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
@@ -15,13 +15,21 @@ _tasks: dict[str, dict] = {}
|
||||
_poll_tasks: dict[str, asyncio.Task] = {}
|
||||
|
||||
|
||||
def submit(freepik_task_id: str, metadata: Optional[dict] = None) -> str:
|
||||
"""Register a Freepik task and start background polling."""
|
||||
def submit(freepik_task_id: str, status_path: str, metadata: Optional[dict] = None) -> str:
|
||||
"""Register a Freepik task and start background polling.
|
||||
|
||||
Args:
|
||||
freepik_task_id: The task_id returned by Freepik.
|
||||
status_path: The per-endpoint GET path for polling, e.g.
|
||||
'/v1/ai/text-to-image/flux-dev/{task-id}'.
|
||||
metadata: Optional metadata to attach to the task.
|
||||
"""
|
||||
internal_id = str(uuid.uuid4())
|
||||
now = datetime.now(timezone.utc)
|
||||
_tasks[internal_id] = {
|
||||
'task_id': internal_id,
|
||||
'freepik_task_id': freepik_task_id,
|
||||
'status_path': status_path,
|
||||
'status': TaskStatus.pending,
|
||||
'created_at': now,
|
||||
'updated_at': now,
|
||||
@@ -55,7 +63,6 @@ def list_tasks(
|
||||
def delete_task(task_id: str) -> bool:
|
||||
if task_id not in _tasks:
|
||||
return False
|
||||
# Cancel polling if active
|
||||
poll = _poll_tasks.pop(task_id, None)
|
||||
if poll and not poll.done():
|
||||
poll.cancel()
|
||||
@@ -71,12 +78,12 @@ def active_count() -> int:
|
||||
|
||||
|
||||
async def _poll_loop(internal_id: str):
|
||||
"""Poll Freepik API until the task completes or times out."""
|
||||
"""Poll Freepik API using the per-endpoint status path until done."""
|
||||
task = _tasks.get(internal_id)
|
||||
if not task:
|
||||
return
|
||||
|
||||
freepik_id = task['freepik_task_id']
|
||||
status_path = task['status_path']
|
||||
elapsed = 0
|
||||
|
||||
try:
|
||||
@@ -85,30 +92,26 @@ async def _poll_loop(internal_id: str):
|
||||
elapsed += settings.task_poll_interval_seconds
|
||||
|
||||
try:
|
||||
result = await freepik_client.get_task_status(freepik_id)
|
||||
result = await freepik_client.get_task_status(status_path)
|
||||
except Exception as exc:
|
||||
logger.warning(f'Poll error for {internal_id}: {exc}')
|
||||
continue
|
||||
|
||||
data = result.get('data', result)
|
||||
fp_status = data.get('status', '')
|
||||
fp_status = str(data.get('status', '')).upper()
|
||||
|
||||
task['updated_at'] = datetime.now(timezone.utc)
|
||||
|
||||
if fp_status in ('IN_PROGRESS', 'PROCESSING', 'processing'):
|
||||
if fp_status in ('CREATED', 'IN_PROGRESS', 'PROCESSING'):
|
||||
task['status'] = TaskStatus.processing
|
||||
task['progress'] = data.get('progress')
|
||||
continue
|
||||
|
||||
if fp_status in ('COMPLETED', 'completed', 'done'):
|
||||
if fp_status == 'COMPLETED':
|
||||
task['status'] = TaskStatus.completed
|
||||
task['progress'] = 1.0
|
||||
# Extract result URL from various response shapes
|
||||
result_url = (
|
||||
data.get('result_url')
|
||||
or data.get('output', {}).get('url')
|
||||
or _extract_first_url(data)
|
||||
)
|
||||
# Freepik returns results in data.generated[] (list of URLs)
|
||||
generated = data.get('generated', [])
|
||||
result_url = generated[0] if generated else None
|
||||
task['result_url'] = result_url
|
||||
if result_url:
|
||||
try:
|
||||
@@ -120,7 +123,7 @@ async def _poll_loop(internal_id: str):
|
||||
logger.info(f'Task {internal_id} completed')
|
||||
return
|
||||
|
||||
if fp_status in ('FAILED', 'failed', 'error'):
|
||||
if fp_status == 'FAILED':
|
||||
task['status'] = TaskStatus.failed
|
||||
task['error'] = data.get('error', data.get('message', 'Unknown error'))
|
||||
logger.warning(f'Task {internal_id} failed: {task["error"]}')
|
||||
@@ -141,23 +144,6 @@ async def _poll_loop(internal_id: str):
|
||||
_poll_tasks.pop(internal_id, None)
|
||||
|
||||
|
||||
def _extract_first_url(data: dict) -> Optional[str]:
|
||||
"""Try to extract the first URL from common Freepik response shapes."""
|
||||
# Some endpoints return {"data": {"images": [{"url": "..."}]}}
|
||||
for key in ('images', 'videos', 'results', 'outputs'):
|
||||
items = data.get(key, [])
|
||||
if isinstance(items, list) and items:
|
||||
first = items[0]
|
||||
if isinstance(first, dict) and 'url' in first:
|
||||
return first['url']
|
||||
if isinstance(first, str) and first.startswith('http'):
|
||||
return first
|
||||
# Direct URL field
|
||||
if 'url' in data and isinstance(data['url'], str):
|
||||
return data['url']
|
||||
return None
|
||||
|
||||
|
||||
def handle_webhook_completion(freepik_task_id: str, result_data: dict):
|
||||
"""Called when a webhook notification arrives for a completed task."""
|
||||
for task in _tasks.values():
|
||||
@@ -165,13 +151,8 @@ def handle_webhook_completion(freepik_task_id: str, result_data: dict):
|
||||
task['status'] = TaskStatus.completed
|
||||
task['progress'] = 1.0
|
||||
task['updated_at'] = datetime.now(timezone.utc)
|
||||
result_url = (
|
||||
result_data.get('result_url')
|
||||
or result_data.get('output', {}).get('url')
|
||||
or _extract_first_url(result_data)
|
||||
)
|
||||
task['result_url'] = result_url
|
||||
# Cancel polling since webhook already notified us
|
||||
generated = result_data.get('generated', [])
|
||||
task['result_url'] = generated[0] if generated else None
|
||||
poll = _poll_tasks.pop(task['task_id'], None)
|
||||
if poll and not poll.done():
|
||||
poll.cancel()
|
||||
|
||||
Reference in New Issue
Block a user