Files
freepik/freepik_cli/api/models.py
T
valknar f24d138ab4 feat: initial Freepik AI CLI
Sophisticated Python CLI for generating and manipulating images and
video via the Freepik API, built with typer + rich.

Commands:
- generate-image: text-to-image with 8 models (flux-2-pro, mystic, seedream, etc.)
- generate-video: image-to-video with 7 models (kling, minimax, runway, etc.)
- generate-icon: text-to-icon in solid/outline/color/flat/sticker styles
- upscale-image: 3 modes (precision-v2, precision, creative) + 2x/4x scale
- upscale-video: standard/turbo modes
- expand-image: outpainting with per-side pixel offsets
- relight: AI-controlled relighting (Premium)
- style-transfer: artistic style application (Premium)
- describe-image: reverse-engineer an image into a prompt
- config set/get/show/reset: configuration management

Features: Rich Live polling panel, exponential backoff, --wait/--no-wait,
auto-timestamped output filenames, streaming download with progress bar,
FREEPIK_API_KEY env var support, venv-based setup.

Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
2026-04-08 10:56:45 +02:00

179 lines
6.4 KiB
Python

"""Enums, endpoint maps, and response normalization utilities."""
from __future__ import annotations
from enum import Enum
from typing import Any
# ---------------------------------------------------------------------------
# Model enums
# ---------------------------------------------------------------------------
class ImageModel(str, Enum):
MYSTIC = "mystic"
FLUX_KONTEXT_PRO = "flux-kontext-pro"
FLUX_2_PRO = "flux-2-pro"
FLUX_2_TURBO = "flux-2-turbo"
FLUX_PRO_1_1 = "flux-pro-1.1"
SEEDREAM_V4 = "seedream-v4"
SEEDREAM_V4_5 = "seedream-v4-5"
IDEOGRAM_V2 = "ideogram-v2"
class VideoModel(str, Enum):
KLING_O1_PRO = "kling-o1-pro"
KLING_O1_STD = "kling-o1-std"
KLING_ELEMENTS_PRO = "kling-elements-pro"
KLING_ELEMENTS_STD = "kling-elements-std"
MINIMAX_HAILUO = "minimax-hailuo"
WAN_2_5 = "wan-2.5"
RUNWAY_GEN4 = "runway-gen4"
class UpscaleMode(str, Enum):
CREATIVE = "creative"
PRECISION = "precision"
PRECISION_V2 = "precision-v2"
class VideoUpscaleMode(str, Enum):
STANDARD = "standard"
TURBO = "turbo"
class AspectRatio(str, Enum):
LANDSCAPE = "16:9"
PORTRAIT = "9:16"
SQUARE = "1:1"
CLASSIC = "4:3"
WIDE = "21:9"
class IconStyle(str, Enum):
SOLID = "solid"
OUTLINE = "outline"
COLOR = "color"
FLAT = "flat"
STICKER = "sticker"
# ---------------------------------------------------------------------------
# Endpoint maps
# ---------------------------------------------------------------------------
IMAGE_POST_ENDPOINTS: dict[ImageModel, str] = {
ImageModel.MYSTIC: "/v1/ai/mystic",
ImageModel.FLUX_KONTEXT_PRO: "/v1/ai/text-to-image/flux-kontext-pro",
ImageModel.FLUX_2_PRO: "/v1/ai/text-to-image/flux-2-pro",
ImageModel.FLUX_2_TURBO: "/v1/ai/text-to-image/flux-2-turbo",
ImageModel.FLUX_PRO_1_1: "/v1/ai/text-to-image/flux-pro-v1-1",
ImageModel.SEEDREAM_V4: "/v1/ai/text-to-image/seedream-v4",
ImageModel.SEEDREAM_V4_5: "/v1/ai/text-to-image/seedream-v4-5",
ImageModel.IDEOGRAM_V2: "/v1/ai/text-to-image/ideogram-v2",
}
IMAGE_STATUS_ENDPOINTS: dict[ImageModel, str] = {
ImageModel.MYSTIC: "/v1/ai/mystic/{task_id}",
ImageModel.FLUX_KONTEXT_PRO: "/v1/ai/text-to-image/flux-kontext-pro/{task_id}",
ImageModel.FLUX_2_PRO: "/v1/ai/text-to-image/flux-2-pro/{task_id}",
ImageModel.FLUX_2_TURBO: "/v1/ai/text-to-image/flux-2-turbo/{task_id}",
ImageModel.FLUX_PRO_1_1: "/v1/ai/text-to-image/flux-pro-v1-1/{task_id}",
ImageModel.SEEDREAM_V4: "/v1/ai/text-to-image/seedream-v4/{task_id}",
ImageModel.SEEDREAM_V4_5: "/v1/ai/text-to-image/seedream-v4-5/{task_id}",
ImageModel.IDEOGRAM_V2: "/v1/ai/text-to-image/ideogram-v2/{task_id}",
}
VIDEO_POST_ENDPOINTS: dict[VideoModel, str] = {
VideoModel.KLING_O1_PRO: "/v1/ai/image-to-video/kling-o1-pro",
VideoModel.KLING_O1_STD: "/v1/ai/image-to-video/kling-o1-std",
VideoModel.KLING_ELEMENTS_PRO: "/v1/ai/image-to-video/kling-elements-pro",
VideoModel.KLING_ELEMENTS_STD: "/v1/ai/image-to-video/kling-elements-std",
VideoModel.MINIMAX_HAILUO: "/v1/ai/image-to-video/minimax-hailuo-02-1080p",
VideoModel.WAN_2_5: "/v1/ai/image-to-video/wan-2-5",
VideoModel.RUNWAY_GEN4: "/v1/ai/image-to-video/runway-gen4",
}
VIDEO_STATUS_ENDPOINTS: dict[VideoModel, str] = {
VideoModel.KLING_O1_PRO: "/v1/ai/image-to-video/kling-o1/{task_id}",
VideoModel.KLING_O1_STD: "/v1/ai/image-to-video/kling-o1/{task_id}",
VideoModel.KLING_ELEMENTS_PRO: "/v1/ai/image-to-video/kling-elements-pro/{task_id}",
VideoModel.KLING_ELEMENTS_STD: "/v1/ai/image-to-video/kling-elements-std/{task_id}",
VideoModel.MINIMAX_HAILUO: "/v1/ai/image-to-video/minimax-hailuo-02-1080p/{task_id}",
VideoModel.WAN_2_5: "/v1/ai/image-to-video/wan-2-5/{task_id}",
VideoModel.RUNWAY_GEN4: "/v1/ai/image-to-video/runway-gen4/{task_id}",
}
UPSCALE_POST_ENDPOINTS: dict[UpscaleMode, str] = {
UpscaleMode.CREATIVE: "/v1/ai/image-upscaler",
UpscaleMode.PRECISION: "/v1/ai/image-upscaler-precision",
UpscaleMode.PRECISION_V2: "/v1/ai/image-upscaler-precision-v2",
}
UPSCALE_STATUS_ENDPOINTS: dict[UpscaleMode, str] = {
UpscaleMode.CREATIVE: "/v1/ai/image-upscaler/{task_id}",
UpscaleMode.PRECISION: "/v1/ai/image-upscaler-precision/{task_id}",
UpscaleMode.PRECISION_V2: "/v1/ai/image-upscaler-precision-v2/{task_id}",
}
VIDEO_UPSCALE_POST_ENDPOINTS: dict[VideoUpscaleMode, str] = {
VideoUpscaleMode.STANDARD: "/v1/ai/video-upscaler",
VideoUpscaleMode.TURBO: "/v1/ai/video-upscaler/turbo",
}
VIDEO_UPSCALE_STATUS_ENDPOINT = "/v1/ai/video-upscaler/{task_id}"
# ---------------------------------------------------------------------------
# Response normalization helpers
# ---------------------------------------------------------------------------
def get_task_id(raw: dict[str, Any]) -> str:
"""Extract task_id from any response shape."""
data = raw.get("data", raw)
task_id = data.get("task_id") or data.get("id") or raw.get("task_id") or raw.get("id")
if not task_id:
raise ValueError(f"No task_id found in response: {raw}")
return str(task_id)
def get_status(raw: dict[str, Any]) -> str:
"""Extract normalized status string from any response shape."""
data = raw.get("data", raw)
status = data.get("status") or raw.get("status") or "PENDING"
return status.upper().replace(" ", "_")
def get_output_urls(raw: dict[str, Any]) -> list[str]:
"""Extract all output file URLs from a completed task response."""
data = raw.get("data", raw)
# Try common key names in order of likelihood
for key in ("generated", "output", "outputs", "result", "results", "images", "videos"):
items = data.get(key)
if items is None:
continue
if isinstance(items, list) and items:
urls = []
for item in items:
if isinstance(item, dict):
url = item.get("url") or item.get("download_url") or item.get("src")
if url:
urls.append(url)
elif isinstance(item, str):
urls.append(item)
if urls:
return urls
elif isinstance(items, dict):
url = items.get("url") or items.get("download_url") or items.get("src")
if url:
return [url]
elif isinstance(items, str):
return [items]
# Fallback: top-level url field
if "url" in data:
return [data["url"]]
return []