fix: align Freepik API paths with OpenAPI spec
Some checks failed
Build and Push Docker Image / build (push) Failing after 9s
Some checks failed
Build and Push Docker Image / build (push) Failing after 9s
The original implementation used guessed endpoint paths that don't match
the actual Freepik API. Key fixes based on their OpenAPI spec:
- Task polling is per-endpoint (e.g. GET /v1/ai/text-to-image/flux-dev/{task-id})
not a generic /v1/ai/tasks/{id}. freepik_client now returns TaskResult
with status_path, and task_tracker polls using that path.
- Fixed endpoint paths: flux-pro -> flux-pro-v1-1, upscale -> image-upscaler,
relight -> image-relight, style-transfer -> image-style-transfer,
expand -> image-expand/flux-pro, inpaint -> ideogram-image-edit,
remove-background -> beta/remove-background, classifier -> classifier/image,
audio-isolate -> audio-isolation, icon -> text-to-icon
- Fixed video paths: kling -> kling-o1-pro with kling-o1 status path,
minimax -> minimax-hailuo-02-1080p, seedance -> seedance-pro-1080p
- Fixed request schemas to match actual API params (e.g. scale_factor
not scale, reference_image not style_reference, image_url for bg removal)
- Fixed response parsing: status is uppercase (COMPLETED not completed),
results in data.generated[] array, classifier returns [{class_name, probability}]
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -1,5 +1,6 @@
|
||||
import asyncio
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
@@ -11,6 +12,13 @@ logger = logging.getLogger(__name__)
|
||||
_client: Optional[httpx.AsyncClient] = None
|
||||
|
||||
|
||||
@dataclass
|
||||
class TaskResult:
|
||||
"""Wraps a Freepik API response with the status polling path."""
|
||||
data: dict
|
||||
status_path: str
|
||||
|
||||
|
||||
def get_client() -> httpx.AsyncClient:
|
||||
if _client is None:
|
||||
raise RuntimeError('Freepik client not initialized')
|
||||
@@ -80,268 +88,325 @@ async def _request(
|
||||
)
|
||||
|
||||
|
||||
async def _request_raw(
|
||||
method: str,
|
||||
path: str,
|
||||
*,
|
||||
json: Optional[dict] = None,
|
||||
data: Optional[dict] = None,
|
||||
files: Optional[dict] = None,
|
||||
max_retries: int = 3,
|
||||
) -> bytes:
|
||||
"""Make a request and return raw bytes (for binary responses)."""
|
||||
client = get_client()
|
||||
for attempt in range(max_retries):
|
||||
kwargs: dict[str, Any] = {}
|
||||
if json is not None:
|
||||
kwargs['json'] = json
|
||||
if data is not None:
|
||||
kwargs['data'] = data
|
||||
if files is not None:
|
||||
kwargs['files'] = files
|
||||
|
||||
response = await client.request(method, path, **kwargs)
|
||||
|
||||
if response.status_code == 429:
|
||||
retry_after = int(response.headers.get('Retry-After', 2 ** attempt))
|
||||
logger.warning(f'Rate limited, retrying in {retry_after}s (attempt {attempt + 1})')
|
||||
await asyncio.sleep(retry_after)
|
||||
continue
|
||||
|
||||
response.raise_for_status()
|
||||
return response.content
|
||||
|
||||
raise httpx.HTTPStatusError(
|
||||
'Rate limit exceeded after retries',
|
||||
request=response.request,
|
||||
response=response,
|
||||
)
|
||||
|
||||
|
||||
def _strip_none(d: dict) -> dict:
|
||||
"""Remove None values from a dict for clean API payloads."""
|
||||
return {k: v for k, v in d.items() if v is not None}
|
||||
|
||||
|
||||
def _extract_task_id(result: dict) -> str:
|
||||
"""Extract task_id from Freepik response (always under data.task_id)."""
|
||||
data = result.get('data', result)
|
||||
if isinstance(data, dict):
|
||||
return str(data.get('task_id', ''))
|
||||
return ''
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Image generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_MYSTIC_PATH = '/v1/ai/mystic'
|
||||
|
||||
|
||||
async def generate_mystic(
|
||||
prompt: str,
|
||||
negative_prompt: Optional[str] = None,
|
||||
resolution: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
model: Optional[str] = None,
|
||||
seed: Optional[int] = None,
|
||||
num_images: Optional[int] = None,
|
||||
) -> dict:
|
||||
styling: Optional[dict] = None,
|
||||
structure_reference: Optional[str] = None,
|
||||
style_reference: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'negative_prompt': negative_prompt,
|
||||
'resolution': resolution,
|
||||
'styling': styling,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'model': model,
|
||||
'seed': seed,
|
||||
'num_images': num_images,
|
||||
'styling': styling,
|
||||
'structure_reference': structure_reference,
|
||||
'style_reference': style_reference,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/mystic', json=payload)
|
||||
result = await _request('POST', _MYSTIC_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_MYSTIC_PATH}/{task_id}')
|
||||
|
||||
|
||||
_FLUX_DEV_PATH = '/v1/ai/text-to-image/flux-dev'
|
||||
|
||||
|
||||
async def generate_flux_dev(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
guidance_scale: Optional[float] = None,
|
||||
num_images: Optional[int] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'guidance_scale': guidance_scale,
|
||||
'num_images': num_images,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'styling': styling,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/flux-dev', json=payload)
|
||||
result = await _request('POST', _FLUX_DEV_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_FLUX_DEV_PATH}/{task_id}')
|
||||
|
||||
|
||||
_FLUX_PRO_PATH = '/v1/ai/text-to-image/flux-pro-v1-1'
|
||||
|
||||
|
||||
async def generate_flux_pro(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
guidance_scale: Optional[float] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
styling: Optional[dict] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'guidance_scale': guidance_scale,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'styling': styling,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/flux-pro-1.1', json=payload)
|
||||
result = await _request('POST', _FLUX_PRO_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_FLUX_PRO_PATH}/{task_id}')
|
||||
|
||||
|
||||
_SEEDREAM_PATH = '/v1/ai/text-to-image/seedream'
|
||||
|
||||
|
||||
async def generate_seedream(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
num_images: Optional[int] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
'num_images': num_images,
|
||||
'seed': seed,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/text-to-image/seedream', json=payload)
|
||||
result = await _request('POST', _SEEDREAM_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_SEEDREAM_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Video generation
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_KLING_O1_PRO_PATH = '/v1/ai/image-to-video/kling-o1-pro'
|
||||
_KLING_O1_STATUS_PATH = '/v1/ai/image-to-video/kling-o1'
|
||||
|
||||
|
||||
async def generate_video_kling(
|
||||
image: str,
|
||||
first_frame: Optional[str] = None,
|
||||
last_frame: Optional[str] = None,
|
||||
prompt: Optional[str] = None,
|
||||
duration: Optional[str] = None,
|
||||
duration: Optional[int] = None,
|
||||
aspect_ratio: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'first_frame': first_frame,
|
||||
'last_frame': last_frame,
|
||||
'prompt': prompt,
|
||||
'duration': duration,
|
||||
'aspect_ratio': aspect_ratio,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/kling', json=payload)
|
||||
result = await _request('POST', _KLING_O1_PRO_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_KLING_O1_STATUS_PATH}/{task_id}')
|
||||
|
||||
|
||||
_MINIMAX_1080P_PATH = '/v1/ai/image-to-video/minimax-hailuo-02-1080p'
|
||||
|
||||
|
||||
async def generate_video_minimax(
|
||||
prompt: str,
|
||||
first_frame_image: Optional[str] = None,
|
||||
subject_reference: Optional[str] = None,
|
||||
) -> dict:
|
||||
last_frame_image: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'first_frame_image': first_frame_image,
|
||||
'subject_reference': subject_reference,
|
||||
'last_frame_image': last_frame_image,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/minimax', json=payload)
|
||||
result = await _request('POST', _MINIMAX_1080P_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_MINIMAX_1080P_PATH}/{task_id}')
|
||||
|
||||
|
||||
_SEEDANCE_PRO_1080P_PATH = '/v1/ai/image-to-video/seedance-pro-1080p'
|
||||
|
||||
|
||||
async def generate_video_seedance(
|
||||
prompt: str,
|
||||
image: Optional[str] = None,
|
||||
duration: Optional[str] = None,
|
||||
resolution: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'image': image,
|
||||
'duration': duration,
|
||||
'resolution': resolution,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/image-to-video/seedance', json=payload)
|
||||
result = await _request('POST', _SEEDANCE_PRO_1080P_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_SEEDANCE_PRO_1080P_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Image editing
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
_UPSCALER_PATH = '/v1/ai/image-upscaler'
|
||||
|
||||
|
||||
async def upscale_creative(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
scale: Optional[int] = None,
|
||||
creativity: Optional[float] = None,
|
||||
resemblance: Optional[float] = None,
|
||||
) -> dict:
|
||||
scale_factor: Optional[str] = None,
|
||||
creativity: Optional[int] = None,
|
||||
resemblance: Optional[int] = None,
|
||||
optimized_for: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'scale': scale,
|
||||
'scale_factor': scale_factor,
|
||||
'creativity': creativity,
|
||||
'resemblance': resemblance,
|
||||
'optimized_for': optimized_for,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/upscale/creative', json=payload)
|
||||
result = await _request('POST', _UPSCALER_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_UPSCALER_PATH}/{task_id}')
|
||||
|
||||
|
||||
_UPSCALER_PRECISION_PATH = '/v1/ai/image-upscaler-precision'
|
||||
|
||||
|
||||
async def upscale_precision(
|
||||
image: str,
|
||||
scale: Optional[int] = None,
|
||||
) -> dict:
|
||||
scale_factor: Optional[str] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'scale': scale,
|
||||
'scale_factor': scale_factor,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/upscale/precision', json=payload)
|
||||
result = await _request('POST', _UPSCALER_PRECISION_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_UPSCALER_PRECISION_PATH}/{task_id}')
|
||||
|
||||
|
||||
_RELIGHT_PATH = '/v1/ai/image-relight'
|
||||
|
||||
|
||||
async def relight_image(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
light_source: Optional[str] = None,
|
||||
intensity: Optional[float] = None,
|
||||
) -> dict:
|
||||
transfer_light_from_reference_image: Optional[str] = None,
|
||||
light_transfer_strength: Optional[int] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'light_source': light_source,
|
||||
'intensity': intensity,
|
||||
'transfer_light_from_reference_image': transfer_light_from_reference_image,
|
||||
'light_transfer_strength': light_transfer_strength,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/relight', json=payload)
|
||||
result = await _request('POST', _RELIGHT_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_RELIGHT_PATH}/{task_id}')
|
||||
|
||||
|
||||
_STYLE_TRANSFER_PATH = '/v1/ai/image-style-transfer'
|
||||
|
||||
|
||||
async def style_transfer(
|
||||
image: str,
|
||||
style_reference: str,
|
||||
strength: Optional[float] = None,
|
||||
) -> dict:
|
||||
reference_image: str,
|
||||
prompt: Optional[str] = None,
|
||||
style_strength: Optional[int] = None,
|
||||
structure_strength: Optional[int] = None,
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'style_reference': style_reference,
|
||||
'strength': strength,
|
||||
'reference_image': reference_image,
|
||||
'prompt': prompt,
|
||||
'style_strength': style_strength,
|
||||
'structure_strength': structure_strength,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/style-transfer', json=payload)
|
||||
result = await _request('POST', _STYLE_TRANSFER_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_STYLE_TRANSFER_PATH}/{task_id}')
|
||||
|
||||
|
||||
_EXPAND_PATH = '/v1/ai/image-expand/flux-pro'
|
||||
|
||||
|
||||
async def expand_image(
|
||||
image: str,
|
||||
prompt: Optional[str] = None,
|
||||
direction: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'image': image,
|
||||
'prompt': prompt,
|
||||
'direction': direction,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/expand', json=payload)
|
||||
result = await _request('POST', _EXPAND_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_EXPAND_PATH}/{task_id}')
|
||||
|
||||
|
||||
_INPAINT_PATH = '/v1/ai/ideogram-image-edit'
|
||||
|
||||
|
||||
async def inpaint(
|
||||
image: str,
|
||||
mask: str,
|
||||
prompt: str,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = {
|
||||
'image': image,
|
||||
'mask': mask,
|
||||
'prompt': prompt,
|
||||
}
|
||||
return await _request('POST', '/v1/ai/inpaint', json=payload)
|
||||
result = await _request('POST', _INPAINT_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_INPAINT_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Utilities
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def remove_background(image: str) -> bytes:
|
||||
return await _request_raw('POST', '/v1/ai/remove-background', json={'image': image})
|
||||
_REMOVE_BG_PATH = '/v1/ai/beta/remove-background'
|
||||
|
||||
|
||||
async def remove_background(image_url: str) -> dict:
|
||||
"""Remove background. Takes an image URL (not base64). Returns URLs."""
|
||||
return await _request(
|
||||
'POST', _REMOVE_BG_PATH,
|
||||
data={'image_url': image_url},
|
||||
)
|
||||
|
||||
|
||||
_CLASSIFIER_PATH = '/v1/ai/classifier/image'
|
||||
|
||||
|
||||
async def classify_image(image: str) -> dict:
|
||||
return await _request('POST', '/v1/ai/classifier', json={'image': image})
|
||||
"""Classify whether image is AI-generated. Returns data: [{class_name, probability}]."""
|
||||
return await _request('POST', _CLASSIFIER_PATH, json={'image': image})
|
||||
|
||||
|
||||
async def isolate_audio(audio: str) -> dict:
|
||||
return await _request('POST', '/v1/ai/audio-isolate', json={'audio': audio})
|
||||
_AUDIO_ISOLATION_PATH = '/v1/ai/audio-isolation'
|
||||
|
||||
|
||||
async def isolate_audio(audio: str) -> TaskResult:
|
||||
result = await _request('POST', _AUDIO_ISOLATION_PATH, json={'audio': audio})
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_AUDIO_ISOLATION_PATH}/{task_id}')
|
||||
|
||||
|
||||
_ICON_PATH = '/v1/ai/text-to-icon'
|
||||
|
||||
|
||||
async def generate_icon(
|
||||
@@ -349,29 +414,32 @@ async def generate_icon(
|
||||
color: Optional[str] = None,
|
||||
shape: Optional[str] = None,
|
||||
style: Optional[str] = None,
|
||||
) -> dict:
|
||||
) -> TaskResult:
|
||||
payload = _strip_none({
|
||||
'prompt': prompt,
|
||||
'color': color,
|
||||
'shape': shape,
|
||||
'style': style,
|
||||
})
|
||||
return await _request('POST', '/v1/ai/icon', json=payload)
|
||||
result = await _request('POST', _ICON_PATH, json=payload)
|
||||
task_id = _extract_task_id(result)
|
||||
return TaskResult(data=result, status_path=f'{_ICON_PATH}/{task_id}')
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Task management
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
async def get_task_status(task_id: str) -> dict:
|
||||
return await _request('GET', f'/v1/ai/tasks/{task_id}')
|
||||
async def get_task_status(status_path: str) -> dict:
|
||||
"""Poll a task's status using its per-endpoint GET path."""
|
||||
return await _request('GET', status_path)
|
||||
|
||||
|
||||
async def check_api_key() -> bool:
|
||||
"""Verify the API key is valid by making a lightweight request."""
|
||||
try:
|
||||
client = get_client()
|
||||
response = await client.get('/v1/ai/tasks', params={'limit': 1})
|
||||
response = await client.get('/v1/ai/mystic', params={'per_page': 1})
|
||||
return response.status_code != 401
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
Reference in New Issue
Block a user