Some checks failed
Build and Push Docker Image / build (push) Failing after 9s
The original implementation used guessed endpoint paths that don't match
the actual Freepik API. Key fixes based on their OpenAPI spec:
- Task polling is per-endpoint (e.g. GET /v1/ai/text-to-image/flux-dev/{task-id})
not a generic /v1/ai/tasks/{id}. freepik_client now returns TaskResult
with status_path, and task_tracker polls using that path.
- Fixed endpoint paths: flux-pro -> flux-pro-v1-1, upscale -> image-upscaler,
relight -> image-relight, style-transfer -> image-style-transfer,
expand -> image-expand/flux-pro, inpaint -> ideogram-image-edit,
remove-background -> beta/remove-background, classifier -> classifier/image,
audio-isolate -> audio-isolation, icon -> text-to-icon
- Fixed video paths: kling -> kling-o1-pro with kling-o1 status path,
minimax -> minimax-hailuo-02-1080p, seedance -> seedance-pro-1080p
- Fixed request schemas to match actual API params (e.g. scale_factor
not scale, reference_image not style_reference, image_url for bg removal)
- Fixed response parsing: status is uppercase (COMPLETED not completed),
results in data.generated[] array, classifier returns [{class_name, probability}]
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
446 lines
13 KiB
Python
446 lines
13 KiB
Python
import asyncio
|
|
import logging
|
|
from dataclasses import dataclass
|
|
from typing import Any, Optional
|
|
|
|
import httpx
|
|
|
|
from app.config import settings
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
_client: Optional[httpx.AsyncClient] = None
|
|
|
|
|
|
@dataclass
|
|
class TaskResult:
|
|
"""Wraps a Freepik API response with the status polling path."""
|
|
data: dict
|
|
status_path: str
|
|
|
|
|
|
def get_client() -> httpx.AsyncClient:
|
|
if _client is None:
|
|
raise RuntimeError('Freepik client not initialized')
|
|
return _client
|
|
|
|
|
|
async def create_client() -> httpx.AsyncClient:
|
|
global _client
|
|
_client = httpx.AsyncClient(
|
|
base_url=settings.freepik_base_url,
|
|
headers={
|
|
'x-freepik-api-key': settings.freepik_api_key,
|
|
'Accept': 'application/json',
|
|
},
|
|
timeout=httpx.Timeout(60.0, connect=10.0),
|
|
)
|
|
return _client
|
|
|
|
|
|
async def close_client():
|
|
global _client
|
|
if _client:
|
|
await _client.aclose()
|
|
_client = None
|
|
|
|
|
|
async def _request(
|
|
method: str,
|
|
path: str,
|
|
*,
|
|
json: Optional[dict] = None,
|
|
data: Optional[dict] = None,
|
|
files: Optional[dict] = None,
|
|
params: Optional[dict] = None,
|
|
max_retries: int = 3,
|
|
) -> dict[str, Any]:
|
|
"""Make an authenticated request to Freepik API with retry on 429."""
|
|
client = get_client()
|
|
for attempt in range(max_retries):
|
|
kwargs: dict[str, Any] = {}
|
|
if json is not None:
|
|
kwargs['json'] = json
|
|
if data is not None:
|
|
kwargs['data'] = data
|
|
if files is not None:
|
|
kwargs['files'] = files
|
|
if params is not None:
|
|
kwargs['params'] = params
|
|
|
|
response = await client.request(method, path, **kwargs)
|
|
|
|
if response.status_code == 429:
|
|
retry_after = int(response.headers.get('Retry-After', 2 ** attempt))
|
|
logger.warning(f'Rate limited, retrying in {retry_after}s (attempt {attempt + 1})')
|
|
await asyncio.sleep(retry_after)
|
|
continue
|
|
|
|
response.raise_for_status()
|
|
if response.headers.get('content-type', '').startswith('application/json'):
|
|
return response.json()
|
|
return {'raw': response.content}
|
|
|
|
raise httpx.HTTPStatusError(
|
|
'Rate limit exceeded after retries',
|
|
request=response.request,
|
|
response=response,
|
|
)
|
|
|
|
|
|
def _strip_none(d: dict) -> dict:
|
|
"""Remove None values from a dict for clean API payloads."""
|
|
return {k: v for k, v in d.items() if v is not None}
|
|
|
|
|
|
def _extract_task_id(result: dict) -> str:
|
|
"""Extract task_id from Freepik response (always under data.task_id)."""
|
|
data = result.get('data', result)
|
|
if isinstance(data, dict):
|
|
return str(data.get('task_id', ''))
|
|
return ''
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Image generation
|
|
# ---------------------------------------------------------------------------
|
|
|
|
_MYSTIC_PATH = '/v1/ai/mystic'
|
|
|
|
|
|
async def generate_mystic(
|
|
prompt: str,
|
|
resolution: Optional[str] = None,
|
|
aspect_ratio: Optional[str] = None,
|
|
model: Optional[str] = None,
|
|
seed: Optional[int] = None,
|
|
styling: Optional[dict] = None,
|
|
structure_reference: Optional[str] = None,
|
|
style_reference: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'resolution': resolution,
|
|
'aspect_ratio': aspect_ratio,
|
|
'model': model,
|
|
'seed': seed,
|
|
'styling': styling,
|
|
'structure_reference': structure_reference,
|
|
'style_reference': style_reference,
|
|
})
|
|
result = await _request('POST', _MYSTIC_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_MYSTIC_PATH}/{task_id}')
|
|
|
|
|
|
_FLUX_DEV_PATH = '/v1/ai/text-to-image/flux-dev'
|
|
|
|
|
|
async def generate_flux_dev(
|
|
prompt: str,
|
|
aspect_ratio: Optional[str] = None,
|
|
styling: Optional[dict] = None,
|
|
seed: Optional[int] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'aspect_ratio': aspect_ratio,
|
|
'styling': styling,
|
|
'seed': seed,
|
|
})
|
|
result = await _request('POST', _FLUX_DEV_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_FLUX_DEV_PATH}/{task_id}')
|
|
|
|
|
|
_FLUX_PRO_PATH = '/v1/ai/text-to-image/flux-pro-v1-1'
|
|
|
|
|
|
async def generate_flux_pro(
|
|
prompt: str,
|
|
aspect_ratio: Optional[str] = None,
|
|
styling: Optional[dict] = None,
|
|
seed: Optional[int] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'aspect_ratio': aspect_ratio,
|
|
'styling': styling,
|
|
'seed': seed,
|
|
})
|
|
result = await _request('POST', _FLUX_PRO_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_FLUX_PRO_PATH}/{task_id}')
|
|
|
|
|
|
_SEEDREAM_PATH = '/v1/ai/text-to-image/seedream'
|
|
|
|
|
|
async def generate_seedream(
|
|
prompt: str,
|
|
aspect_ratio: Optional[str] = None,
|
|
seed: Optional[int] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'aspect_ratio': aspect_ratio,
|
|
'seed': seed,
|
|
})
|
|
result = await _request('POST', _SEEDREAM_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_SEEDREAM_PATH}/{task_id}')
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Video generation
|
|
# ---------------------------------------------------------------------------
|
|
|
|
_KLING_O1_PRO_PATH = '/v1/ai/image-to-video/kling-o1-pro'
|
|
_KLING_O1_STATUS_PATH = '/v1/ai/image-to-video/kling-o1'
|
|
|
|
|
|
async def generate_video_kling(
|
|
first_frame: Optional[str] = None,
|
|
last_frame: Optional[str] = None,
|
|
prompt: Optional[str] = None,
|
|
duration: Optional[int] = None,
|
|
aspect_ratio: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'first_frame': first_frame,
|
|
'last_frame': last_frame,
|
|
'prompt': prompt,
|
|
'duration': duration,
|
|
'aspect_ratio': aspect_ratio,
|
|
})
|
|
result = await _request('POST', _KLING_O1_PRO_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_KLING_O1_STATUS_PATH}/{task_id}')
|
|
|
|
|
|
_MINIMAX_1080P_PATH = '/v1/ai/image-to-video/minimax-hailuo-02-1080p'
|
|
|
|
|
|
async def generate_video_minimax(
|
|
prompt: str,
|
|
first_frame_image: Optional[str] = None,
|
|
last_frame_image: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'first_frame_image': first_frame_image,
|
|
'last_frame_image': last_frame_image,
|
|
})
|
|
result = await _request('POST', _MINIMAX_1080P_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_MINIMAX_1080P_PATH}/{task_id}')
|
|
|
|
|
|
_SEEDANCE_PRO_1080P_PATH = '/v1/ai/image-to-video/seedance-pro-1080p'
|
|
|
|
|
|
async def generate_video_seedance(
|
|
prompt: str,
|
|
image: Optional[str] = None,
|
|
duration: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'image': image,
|
|
'duration': duration,
|
|
})
|
|
result = await _request('POST', _SEEDANCE_PRO_1080P_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_SEEDANCE_PRO_1080P_PATH}/{task_id}')
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Image editing
|
|
# ---------------------------------------------------------------------------
|
|
|
|
_UPSCALER_PATH = '/v1/ai/image-upscaler'
|
|
|
|
|
|
async def upscale_creative(
|
|
image: str,
|
|
prompt: Optional[str] = None,
|
|
scale_factor: Optional[str] = None,
|
|
creativity: Optional[int] = None,
|
|
resemblance: Optional[int] = None,
|
|
optimized_for: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'image': image,
|
|
'prompt': prompt,
|
|
'scale_factor': scale_factor,
|
|
'creativity': creativity,
|
|
'resemblance': resemblance,
|
|
'optimized_for': optimized_for,
|
|
})
|
|
result = await _request('POST', _UPSCALER_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_UPSCALER_PATH}/{task_id}')
|
|
|
|
|
|
_UPSCALER_PRECISION_PATH = '/v1/ai/image-upscaler-precision'
|
|
|
|
|
|
async def upscale_precision(
|
|
image: str,
|
|
scale_factor: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'image': image,
|
|
'scale_factor': scale_factor,
|
|
})
|
|
result = await _request('POST', _UPSCALER_PRECISION_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_UPSCALER_PRECISION_PATH}/{task_id}')
|
|
|
|
|
|
_RELIGHT_PATH = '/v1/ai/image-relight'
|
|
|
|
|
|
async def relight_image(
|
|
image: str,
|
|
prompt: Optional[str] = None,
|
|
transfer_light_from_reference_image: Optional[str] = None,
|
|
light_transfer_strength: Optional[int] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'image': image,
|
|
'prompt': prompt,
|
|
'transfer_light_from_reference_image': transfer_light_from_reference_image,
|
|
'light_transfer_strength': light_transfer_strength,
|
|
})
|
|
result = await _request('POST', _RELIGHT_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_RELIGHT_PATH}/{task_id}')
|
|
|
|
|
|
_STYLE_TRANSFER_PATH = '/v1/ai/image-style-transfer'
|
|
|
|
|
|
async def style_transfer(
|
|
image: str,
|
|
reference_image: str,
|
|
prompt: Optional[str] = None,
|
|
style_strength: Optional[int] = None,
|
|
structure_strength: Optional[int] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'image': image,
|
|
'reference_image': reference_image,
|
|
'prompt': prompt,
|
|
'style_strength': style_strength,
|
|
'structure_strength': structure_strength,
|
|
})
|
|
result = await _request('POST', _STYLE_TRANSFER_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_STYLE_TRANSFER_PATH}/{task_id}')
|
|
|
|
|
|
_EXPAND_PATH = '/v1/ai/image-expand/flux-pro'
|
|
|
|
|
|
async def expand_image(
|
|
image: str,
|
|
prompt: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'image': image,
|
|
'prompt': prompt,
|
|
})
|
|
result = await _request('POST', _EXPAND_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_EXPAND_PATH}/{task_id}')
|
|
|
|
|
|
_INPAINT_PATH = '/v1/ai/ideogram-image-edit'
|
|
|
|
|
|
async def inpaint(
|
|
image: str,
|
|
mask: str,
|
|
prompt: str,
|
|
) -> TaskResult:
|
|
payload = {
|
|
'image': image,
|
|
'mask': mask,
|
|
'prompt': prompt,
|
|
}
|
|
result = await _request('POST', _INPAINT_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_INPAINT_PATH}/{task_id}')
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Utilities
|
|
# ---------------------------------------------------------------------------
|
|
|
|
_REMOVE_BG_PATH = '/v1/ai/beta/remove-background'
|
|
|
|
|
|
async def remove_background(image_url: str) -> dict:
|
|
"""Remove background. Takes an image URL (not base64). Returns URLs."""
|
|
return await _request(
|
|
'POST', _REMOVE_BG_PATH,
|
|
data={'image_url': image_url},
|
|
)
|
|
|
|
|
|
_CLASSIFIER_PATH = '/v1/ai/classifier/image'
|
|
|
|
|
|
async def classify_image(image: str) -> dict:
|
|
"""Classify whether image is AI-generated. Returns data: [{class_name, probability}]."""
|
|
return await _request('POST', _CLASSIFIER_PATH, json={'image': image})
|
|
|
|
|
|
_AUDIO_ISOLATION_PATH = '/v1/ai/audio-isolation'
|
|
|
|
|
|
async def isolate_audio(audio: str) -> TaskResult:
|
|
result = await _request('POST', _AUDIO_ISOLATION_PATH, json={'audio': audio})
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_AUDIO_ISOLATION_PATH}/{task_id}')
|
|
|
|
|
|
_ICON_PATH = '/v1/ai/text-to-icon'
|
|
|
|
|
|
async def generate_icon(
|
|
prompt: str,
|
|
color: Optional[str] = None,
|
|
shape: Optional[str] = None,
|
|
style: Optional[str] = None,
|
|
) -> TaskResult:
|
|
payload = _strip_none({
|
|
'prompt': prompt,
|
|
'color': color,
|
|
'shape': shape,
|
|
'style': style,
|
|
})
|
|
result = await _request('POST', _ICON_PATH, json=payload)
|
|
task_id = _extract_task_id(result)
|
|
return TaskResult(data=result, status_path=f'{_ICON_PATH}/{task_id}')
|
|
|
|
|
|
# ---------------------------------------------------------------------------
|
|
# Task management
|
|
# ---------------------------------------------------------------------------
|
|
|
|
async def get_task_status(status_path: str) -> dict:
|
|
"""Poll a task's status using its per-endpoint GET path."""
|
|
return await _request('GET', status_path)
|
|
|
|
|
|
async def check_api_key() -> bool:
|
|
"""Verify the API key is valid by making a lightweight request."""
|
|
try:
|
|
client = get_client()
|
|
response = await client.get('/v1/ai/mystic', params={'per_page': 1})
|
|
return response.status_code != 401
|
|
except Exception:
|
|
return False
|