feat: initial Freepik AI CLI
Sophisticated Python CLI for generating and manipulating images and video via the Freepik API, built with typer + rich. Commands: - generate-image: text-to-image with 8 models (flux-2-pro, mystic, seedream, etc.) - generate-video: image-to-video with 7 models (kling, minimax, runway, etc.) - generate-icon: text-to-icon in solid/outline/color/flat/sticker styles - upscale-image: 3 modes (precision-v2, precision, creative) + 2x/4x scale - upscale-video: standard/turbo modes - expand-image: outpainting with per-side pixel offsets - relight: AI-controlled relighting (Premium) - style-transfer: artistic style application (Premium) - describe-image: reverse-engineer an image into a prompt - config set/get/show/reset: configuration management Features: Rich Live polling panel, exponential backoff, --wait/--no-wait, auto-timestamped output filenames, streaming download with progress bar, FREEPIK_API_KEY env var support, venv-based setup. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -0,0 +1,109 @@
|
||||
"""Freepik HTTP client with authentication, error handling, and download support."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional
|
||||
|
||||
import httpx
|
||||
|
||||
from freepik_cli import __version__
|
||||
|
||||
BASE_URL = "https://api.freepik.com"
|
||||
DEFAULT_TIMEOUT = 60.0
|
||||
|
||||
|
||||
class FreepikAPIError(Exception):
|
||||
"""Raised when the Freepik API returns an error response."""
|
||||
|
||||
def __init__(self, message: str, status_code: Optional[int] = None, raw: Optional[dict] = None):
|
||||
super().__init__(message)
|
||||
self.status_code = status_code
|
||||
self.raw = raw or {}
|
||||
|
||||
@classmethod
|
||||
def from_response(cls, response: httpx.Response) -> "FreepikAPIError":
|
||||
try:
|
||||
body = response.json()
|
||||
except Exception:
|
||||
body = {}
|
||||
|
||||
message = (
|
||||
body.get("message")
|
||||
or body.get("error", {}).get("message")
|
||||
or body.get("errors", [{}])[0].get("message")
|
||||
or f"HTTP {response.status_code}"
|
||||
)
|
||||
|
||||
hints = {
|
||||
401: "Check your API key — set FREEPIK_API_KEY or use --api-key.",
|
||||
403: "Your plan may not support this feature. Check your Freepik subscription.",
|
||||
422: "Invalid request parameters. Check the options you provided.",
|
||||
429: "Rate limit exceeded. Please wait before retrying.",
|
||||
}
|
||||
hint = hints.get(response.status_code)
|
||||
if hint:
|
||||
message = f"{message}\n\nHint: {hint}"
|
||||
|
||||
return cls(message, status_code=response.status_code, raw=body)
|
||||
|
||||
|
||||
class FreepikClient:
|
||||
"""Thin synchronous HTTP wrapper around the Freepik API."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
api_key: str,
|
||||
base_url: str = BASE_URL,
|
||||
timeout: float = DEFAULT_TIMEOUT,
|
||||
) -> None:
|
||||
self._client = httpx.Client(
|
||||
base_url=base_url,
|
||||
headers={
|
||||
"x-freepik-api-key": api_key,
|
||||
"Content-Type": "application/json",
|
||||
"Accept": "application/json",
|
||||
"User-Agent": f"freepik-cli/{__version__}",
|
||||
},
|
||||
timeout=httpx.Timeout(timeout),
|
||||
)
|
||||
|
||||
def post(self, path: str, json: dict[str, Any]) -> dict[str, Any]:
|
||||
try:
|
||||
response = self._client.post(path, json=json)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise FreepikAPIError.from_response(exc.response) from exc
|
||||
except httpx.RequestError as exc:
|
||||
raise FreepikAPIError(f"Network error: {exc}") from exc
|
||||
|
||||
def post_multipart(self, path: str, data: dict[str, Any], files: dict[str, Any]) -> dict[str, Any]:
|
||||
"""POST with multipart/form-data (for file uploads)."""
|
||||
headers = {k: v for k, v in self._client.headers.items() if k.lower() != "content-type"}
|
||||
try:
|
||||
response = self._client.post(path, data=data, files=files, headers=headers)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise FreepikAPIError.from_response(exc.response) from exc
|
||||
except httpx.RequestError as exc:
|
||||
raise FreepikAPIError(f"Network error: {exc}") from exc
|
||||
|
||||
def get(self, path: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
|
||||
try:
|
||||
response = self._client.get(path, params=params)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except httpx.HTTPStatusError as exc:
|
||||
raise FreepikAPIError.from_response(exc.response) from exc
|
||||
except httpx.RequestError as exc:
|
||||
raise FreepikAPIError(f"Network error: {exc}") from exc
|
||||
|
||||
def __enter__(self) -> "FreepikClient":
|
||||
return self
|
||||
|
||||
def __exit__(self, *args: Any) -> None:
|
||||
self._client.close()
|
||||
|
||||
def close(self) -> None:
|
||||
self._client.close()
|
||||
@@ -0,0 +1,98 @@
|
||||
"""Image editing API methods: expand, relight, style-transfer, icons."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from freepik_cli.api.client import FreepikClient
|
||||
from freepik_cli.api.models import IconStyle, get_output_urls, get_status, get_task_id
|
||||
|
||||
|
||||
class EditAPI:
|
||||
def __init__(self, client: FreepikClient) -> None:
|
||||
self._client = client
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Icon generation
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def generate_icon(
|
||||
self,
|
||||
prompt: str,
|
||||
style: IconStyle = IconStyle.COLOR,
|
||||
num_inference_steps: int = 30,
|
||||
guidance_scale: float = 7.5,
|
||||
seed: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Submit a text-to-icon task. Returns task_id."""
|
||||
payload: dict[str, Any] = {
|
||||
"prompt": prompt,
|
||||
"style": style.value,
|
||||
"num_inference_steps": num_inference_steps,
|
||||
"guidance_scale": guidance_scale,
|
||||
}
|
||||
if seed is not None:
|
||||
payload["seed"] = seed
|
||||
|
||||
raw = self._client.post("/v1/ai/text-to-icon", json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def icon_status(self, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
raw = self._client.get(f"/v1/ai/text-to-icon/{task_id}")
|
||||
return get_status(raw), raw
|
||||
|
||||
def render_icon(self, task_id: str, fmt: str = "png") -> str:
|
||||
"""Get the download URL for a completed icon in PNG or SVG format."""
|
||||
raw = self._client.post(f"/v1/ai/text-to-icon/{task_id}/render/{fmt}", json={})
|
||||
data = raw.get("data", raw)
|
||||
return data.get("url") or data.get("download_url") or ""
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Relight
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def relight_submit(
|
||||
self,
|
||||
image_b64: str,
|
||||
prompt: Optional[str] = None,
|
||||
style: Optional[str] = None,
|
||||
) -> str:
|
||||
payload: dict[str, Any] = {"image": image_b64}
|
||||
if prompt:
|
||||
payload["prompt"] = prompt
|
||||
if style:
|
||||
payload["style"] = style
|
||||
|
||||
raw = self._client.post("/v1/ai/image-relight", json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def relight_status(self, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
raw = self._client.get(f"/v1/ai/image-relight/{task_id}")
|
||||
return get_status(raw), raw
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Style Transfer
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def style_transfer_submit(
|
||||
self,
|
||||
content_image_b64: str,
|
||||
style_image_b64: str,
|
||||
strength: Optional[float] = None,
|
||||
) -> str:
|
||||
payload: dict[str, Any] = {
|
||||
"content_image": content_image_b64,
|
||||
"style_image": style_image_b64,
|
||||
}
|
||||
if strength is not None:
|
||||
payload["strength"] = strength
|
||||
|
||||
raw = self._client.post("/v1/ai/image-style-transfer", json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def style_transfer_status(self, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
raw = self._client.get(f"/v1/ai/image-style-transfer/{task_id}")
|
||||
return get_status(raw), raw
|
||||
|
||||
def get_output_urls(self, raw: dict[str, Any]) -> list[str]:
|
||||
return get_output_urls(raw)
|
||||
@@ -0,0 +1,105 @@
|
||||
"""Image generation and analysis API methods."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from freepik_cli.api.client import FreepikClient
|
||||
from freepik_cli.api.models import (
|
||||
IMAGE_POST_ENDPOINTS,
|
||||
IMAGE_STATUS_ENDPOINTS,
|
||||
ImageModel,
|
||||
get_output_urls,
|
||||
get_status,
|
||||
get_task_id,
|
||||
)
|
||||
|
||||
|
||||
class ImageAPI:
|
||||
def __init__(self, client: FreepikClient) -> None:
|
||||
self._client = client
|
||||
|
||||
def generate(self, model: ImageModel, payload: dict[str, Any]) -> str:
|
||||
"""Submit a generation task. Returns task_id."""
|
||||
endpoint = IMAGE_POST_ENDPOINTS[model]
|
||||
raw = self._client.post(endpoint, json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def get_status(self, model: ImageModel, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
"""Poll status. Returns (status_str, raw_response)."""
|
||||
endpoint = IMAGE_STATUS_ENDPOINTS[model].format(task_id=task_id)
|
||||
raw = self._client.get(endpoint)
|
||||
return get_status(raw), raw
|
||||
|
||||
def get_output_urls(self, raw: dict[str, Any]) -> list[str]:
|
||||
return get_output_urls(raw)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Image-to-prompt (describe)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def describe_submit(self, image_b64: str) -> str:
|
||||
"""Submit image-to-prompt task. Returns task_id."""
|
||||
raw = self._client.post("/v1/ai/image-to-prompt", json={"image": image_b64})
|
||||
return get_task_id(raw)
|
||||
|
||||
def describe_status(self, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
raw = self._client.get(f"/v1/ai/image-to-prompt/{task_id}")
|
||||
return get_status(raw), raw
|
||||
|
||||
def get_prompt_text(self, raw: dict[str, Any]) -> str:
|
||||
"""Extract generated prompt text from a completed describe response."""
|
||||
data = raw.get("data", raw)
|
||||
return (
|
||||
data.get("prompt")
|
||||
or data.get("description")
|
||||
or data.get("text")
|
||||
or data.get("result", {}).get("prompt", "")
|
||||
or ""
|
||||
)
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Image expansion (outpainting)
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def expand_submit(
|
||||
self,
|
||||
model: str,
|
||||
image_b64: str,
|
||||
left: int = 0,
|
||||
right: int = 0,
|
||||
top: int = 0,
|
||||
bottom: int = 0,
|
||||
prompt: Optional[str] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> str:
|
||||
payload: dict[str, Any] = {
|
||||
"image": image_b64,
|
||||
"left": left,
|
||||
"right": right,
|
||||
"top": top,
|
||||
"bottom": bottom,
|
||||
}
|
||||
if prompt:
|
||||
payload["prompt"] = prompt
|
||||
if seed is not None:
|
||||
payload["seed"] = seed
|
||||
|
||||
endpoint_map = {
|
||||
"flux-pro": "/v1/ai/image-expand/flux-pro",
|
||||
"ideogram": "/v1/ai/image-expand/ideogram",
|
||||
"seedream-v4-5": "/v1/ai/image-expand/seedream-v4-5",
|
||||
}
|
||||
endpoint = endpoint_map.get(model, "/v1/ai/image-expand/flux-pro")
|
||||
raw = self._client.post(endpoint, json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def expand_status(self, model: str, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
endpoint_map = {
|
||||
"flux-pro": "/v1/ai/image-expand/flux-pro",
|
||||
"ideogram": "/v1/ai/image-expand/ideogram",
|
||||
"seedream-v4-5": "/v1/ai/image-expand/seedream-v4-5",
|
||||
}
|
||||
base = endpoint_map.get(model, "/v1/ai/image-expand/flux-pro")
|
||||
raw = self._client.get(f"{base}/{task_id}")
|
||||
return get_status(raw), raw
|
||||
@@ -0,0 +1,178 @@
|
||||
"""Enums, endpoint maps, and response normalization utilities."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from enum import Enum
|
||||
from typing import Any
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Model enums
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
class ImageModel(str, Enum):
|
||||
MYSTIC = "mystic"
|
||||
FLUX_KONTEXT_PRO = "flux-kontext-pro"
|
||||
FLUX_2_PRO = "flux-2-pro"
|
||||
FLUX_2_TURBO = "flux-2-turbo"
|
||||
FLUX_PRO_1_1 = "flux-pro-1.1"
|
||||
SEEDREAM_V4 = "seedream-v4"
|
||||
SEEDREAM_V4_5 = "seedream-v4-5"
|
||||
IDEOGRAM_V2 = "ideogram-v2"
|
||||
|
||||
|
||||
class VideoModel(str, Enum):
|
||||
KLING_O1_PRO = "kling-o1-pro"
|
||||
KLING_O1_STD = "kling-o1-std"
|
||||
KLING_ELEMENTS_PRO = "kling-elements-pro"
|
||||
KLING_ELEMENTS_STD = "kling-elements-std"
|
||||
MINIMAX_HAILUO = "minimax-hailuo"
|
||||
WAN_2_5 = "wan-2.5"
|
||||
RUNWAY_GEN4 = "runway-gen4"
|
||||
|
||||
|
||||
class UpscaleMode(str, Enum):
|
||||
CREATIVE = "creative"
|
||||
PRECISION = "precision"
|
||||
PRECISION_V2 = "precision-v2"
|
||||
|
||||
|
||||
class VideoUpscaleMode(str, Enum):
|
||||
STANDARD = "standard"
|
||||
TURBO = "turbo"
|
||||
|
||||
|
||||
class AspectRatio(str, Enum):
|
||||
LANDSCAPE = "16:9"
|
||||
PORTRAIT = "9:16"
|
||||
SQUARE = "1:1"
|
||||
CLASSIC = "4:3"
|
||||
WIDE = "21:9"
|
||||
|
||||
|
||||
class IconStyle(str, Enum):
|
||||
SOLID = "solid"
|
||||
OUTLINE = "outline"
|
||||
COLOR = "color"
|
||||
FLAT = "flat"
|
||||
STICKER = "sticker"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Endpoint maps
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
IMAGE_POST_ENDPOINTS: dict[ImageModel, str] = {
|
||||
ImageModel.MYSTIC: "/v1/ai/mystic",
|
||||
ImageModel.FLUX_KONTEXT_PRO: "/v1/ai/text-to-image/flux-kontext-pro",
|
||||
ImageModel.FLUX_2_PRO: "/v1/ai/text-to-image/flux-2-pro",
|
||||
ImageModel.FLUX_2_TURBO: "/v1/ai/text-to-image/flux-2-turbo",
|
||||
ImageModel.FLUX_PRO_1_1: "/v1/ai/text-to-image/flux-pro-v1-1",
|
||||
ImageModel.SEEDREAM_V4: "/v1/ai/text-to-image/seedream-v4",
|
||||
ImageModel.SEEDREAM_V4_5: "/v1/ai/text-to-image/seedream-v4-5",
|
||||
ImageModel.IDEOGRAM_V2: "/v1/ai/text-to-image/ideogram-v2",
|
||||
}
|
||||
|
||||
IMAGE_STATUS_ENDPOINTS: dict[ImageModel, str] = {
|
||||
ImageModel.MYSTIC: "/v1/ai/mystic/{task_id}",
|
||||
ImageModel.FLUX_KONTEXT_PRO: "/v1/ai/text-to-image/flux-kontext-pro/{task_id}",
|
||||
ImageModel.FLUX_2_PRO: "/v1/ai/text-to-image/flux-2-pro/{task_id}",
|
||||
ImageModel.FLUX_2_TURBO: "/v1/ai/text-to-image/flux-2-turbo/{task_id}",
|
||||
ImageModel.FLUX_PRO_1_1: "/v1/ai/text-to-image/flux-pro-v1-1/{task_id}",
|
||||
ImageModel.SEEDREAM_V4: "/v1/ai/text-to-image/seedream-v4/{task_id}",
|
||||
ImageModel.SEEDREAM_V4_5: "/v1/ai/text-to-image/seedream-v4-5/{task_id}",
|
||||
ImageModel.IDEOGRAM_V2: "/v1/ai/text-to-image/ideogram-v2/{task_id}",
|
||||
}
|
||||
|
||||
VIDEO_POST_ENDPOINTS: dict[VideoModel, str] = {
|
||||
VideoModel.KLING_O1_PRO: "/v1/ai/image-to-video/kling-o1-pro",
|
||||
VideoModel.KLING_O1_STD: "/v1/ai/image-to-video/kling-o1-std",
|
||||
VideoModel.KLING_ELEMENTS_PRO: "/v1/ai/image-to-video/kling-elements-pro",
|
||||
VideoModel.KLING_ELEMENTS_STD: "/v1/ai/image-to-video/kling-elements-std",
|
||||
VideoModel.MINIMAX_HAILUO: "/v1/ai/image-to-video/minimax-hailuo-02-1080p",
|
||||
VideoModel.WAN_2_5: "/v1/ai/image-to-video/wan-2-5",
|
||||
VideoModel.RUNWAY_GEN4: "/v1/ai/image-to-video/runway-gen4",
|
||||
}
|
||||
|
||||
VIDEO_STATUS_ENDPOINTS: dict[VideoModel, str] = {
|
||||
VideoModel.KLING_O1_PRO: "/v1/ai/image-to-video/kling-o1/{task_id}",
|
||||
VideoModel.KLING_O1_STD: "/v1/ai/image-to-video/kling-o1/{task_id}",
|
||||
VideoModel.KLING_ELEMENTS_PRO: "/v1/ai/image-to-video/kling-elements-pro/{task_id}",
|
||||
VideoModel.KLING_ELEMENTS_STD: "/v1/ai/image-to-video/kling-elements-std/{task_id}",
|
||||
VideoModel.MINIMAX_HAILUO: "/v1/ai/image-to-video/minimax-hailuo-02-1080p/{task_id}",
|
||||
VideoModel.WAN_2_5: "/v1/ai/image-to-video/wan-2-5/{task_id}",
|
||||
VideoModel.RUNWAY_GEN4: "/v1/ai/image-to-video/runway-gen4/{task_id}",
|
||||
}
|
||||
|
||||
UPSCALE_POST_ENDPOINTS: dict[UpscaleMode, str] = {
|
||||
UpscaleMode.CREATIVE: "/v1/ai/image-upscaler",
|
||||
UpscaleMode.PRECISION: "/v1/ai/image-upscaler-precision",
|
||||
UpscaleMode.PRECISION_V2: "/v1/ai/image-upscaler-precision-v2",
|
||||
}
|
||||
|
||||
UPSCALE_STATUS_ENDPOINTS: dict[UpscaleMode, str] = {
|
||||
UpscaleMode.CREATIVE: "/v1/ai/image-upscaler/{task_id}",
|
||||
UpscaleMode.PRECISION: "/v1/ai/image-upscaler-precision/{task_id}",
|
||||
UpscaleMode.PRECISION_V2: "/v1/ai/image-upscaler-precision-v2/{task_id}",
|
||||
}
|
||||
|
||||
VIDEO_UPSCALE_POST_ENDPOINTS: dict[VideoUpscaleMode, str] = {
|
||||
VideoUpscaleMode.STANDARD: "/v1/ai/video-upscaler",
|
||||
VideoUpscaleMode.TURBO: "/v1/ai/video-upscaler/turbo",
|
||||
}
|
||||
|
||||
VIDEO_UPSCALE_STATUS_ENDPOINT = "/v1/ai/video-upscaler/{task_id}"
|
||||
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Response normalization helpers
|
||||
# ---------------------------------------------------------------------------
|
||||
|
||||
def get_task_id(raw: dict[str, Any]) -> str:
|
||||
"""Extract task_id from any response shape."""
|
||||
data = raw.get("data", raw)
|
||||
task_id = data.get("task_id") or data.get("id") or raw.get("task_id") or raw.get("id")
|
||||
if not task_id:
|
||||
raise ValueError(f"No task_id found in response: {raw}")
|
||||
return str(task_id)
|
||||
|
||||
|
||||
def get_status(raw: dict[str, Any]) -> str:
|
||||
"""Extract normalized status string from any response shape."""
|
||||
data = raw.get("data", raw)
|
||||
status = data.get("status") or raw.get("status") or "PENDING"
|
||||
return status.upper().replace(" ", "_")
|
||||
|
||||
|
||||
def get_output_urls(raw: dict[str, Any]) -> list[str]:
|
||||
"""Extract all output file URLs from a completed task response."""
|
||||
data = raw.get("data", raw)
|
||||
|
||||
# Try common key names in order of likelihood
|
||||
for key in ("generated", "output", "outputs", "result", "results", "images", "videos"):
|
||||
items = data.get(key)
|
||||
if items is None:
|
||||
continue
|
||||
if isinstance(items, list) and items:
|
||||
urls = []
|
||||
for item in items:
|
||||
if isinstance(item, dict):
|
||||
url = item.get("url") or item.get("download_url") or item.get("src")
|
||||
if url:
|
||||
urls.append(url)
|
||||
elif isinstance(item, str):
|
||||
urls.append(item)
|
||||
if urls:
|
||||
return urls
|
||||
elif isinstance(items, dict):
|
||||
url = items.get("url") or items.get("download_url") or items.get("src")
|
||||
if url:
|
||||
return [url]
|
||||
elif isinstance(items, str):
|
||||
return [items]
|
||||
|
||||
# Fallback: top-level url field
|
||||
if "url" in data:
|
||||
return [data["url"]]
|
||||
|
||||
return []
|
||||
@@ -0,0 +1,88 @@
|
||||
"""Image and video upscaling API methods."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from freepik_cli.api.client import FreepikClient
|
||||
from freepik_cli.api.models import (
|
||||
UPSCALE_POST_ENDPOINTS,
|
||||
UPSCALE_STATUS_ENDPOINTS,
|
||||
VIDEO_UPSCALE_POST_ENDPOINTS,
|
||||
VIDEO_UPSCALE_STATUS_ENDPOINT,
|
||||
UpscaleMode,
|
||||
VideoUpscaleMode,
|
||||
get_output_urls,
|
||||
get_status,
|
||||
get_task_id,
|
||||
)
|
||||
|
||||
|
||||
class UpscaleAPI:
|
||||
def __init__(self, client: FreepikClient) -> None:
|
||||
self._client = client
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Image upscaling
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def upscale_image(
|
||||
self,
|
||||
mode: UpscaleMode,
|
||||
image_b64: str,
|
||||
scale_factor: Optional[str] = None,
|
||||
creativity: Optional[int] = None,
|
||||
prompt: Optional[str] = None,
|
||||
seed: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Submit an image upscale task. Returns task_id."""
|
||||
payload: dict[str, Any] = {"image": image_b64}
|
||||
|
||||
if scale_factor:
|
||||
# Convert "2x" → 2, "4x" → 4
|
||||
factor = scale_factor.rstrip("xX")
|
||||
try:
|
||||
payload["scale_factor"] = int(factor)
|
||||
except ValueError:
|
||||
payload["scale_factor"] = 2
|
||||
|
||||
if mode == UpscaleMode.CREATIVE:
|
||||
if creativity is not None:
|
||||
payload["creativity"] = creativity
|
||||
if prompt:
|
||||
payload["prompt"] = prompt
|
||||
|
||||
if seed is not None:
|
||||
payload["seed"] = seed
|
||||
|
||||
endpoint = UPSCALE_POST_ENDPOINTS[mode]
|
||||
raw = self._client.post(endpoint, json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def upscale_image_status(self, mode: UpscaleMode, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
endpoint = UPSCALE_STATUS_ENDPOINTS[mode].format(task_id=task_id)
|
||||
raw = self._client.get(endpoint)
|
||||
return get_status(raw), raw
|
||||
|
||||
# ------------------------------------------------------------------
|
||||
# Video upscaling
|
||||
# ------------------------------------------------------------------
|
||||
|
||||
def upscale_video(
|
||||
self,
|
||||
mode: VideoUpscaleMode,
|
||||
video_b64: str,
|
||||
) -> str:
|
||||
"""Submit a video upscale task. Returns task_id."""
|
||||
payload: dict[str, Any] = {"video": video_b64}
|
||||
endpoint = VIDEO_UPSCALE_POST_ENDPOINTS[mode]
|
||||
raw = self._client.post(endpoint, json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def upscale_video_status(self, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
endpoint = VIDEO_UPSCALE_STATUS_ENDPOINT.format(task_id=task_id)
|
||||
raw = self._client.get(endpoint)
|
||||
return get_status(raw), raw
|
||||
|
||||
def get_output_urls(self, raw: dict[str, Any]) -> list[str]:
|
||||
return get_output_urls(raw)
|
||||
@@ -0,0 +1,55 @@
|
||||
"""Video generation API methods."""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any, Optional, Tuple
|
||||
|
||||
from freepik_cli.api.client import FreepikClient
|
||||
from freepik_cli.api.models import (
|
||||
VIDEO_POST_ENDPOINTS,
|
||||
VIDEO_STATUS_ENDPOINTS,
|
||||
VideoModel,
|
||||
get_output_urls,
|
||||
get_status,
|
||||
get_task_id,
|
||||
)
|
||||
|
||||
|
||||
class VideoAPI:
|
||||
def __init__(self, client: FreepikClient) -> None:
|
||||
self._client = client
|
||||
|
||||
def generate(
|
||||
self,
|
||||
model: VideoModel,
|
||||
image_b64: str,
|
||||
prompt: Optional[str] = None,
|
||||
duration: int = 5,
|
||||
aspect_ratio: str = "16:9",
|
||||
seed: Optional[int] = None,
|
||||
) -> str:
|
||||
"""Submit an image-to-video task. Returns task_id."""
|
||||
payload: dict[str, Any] = {
|
||||
"image": image_b64,
|
||||
}
|
||||
if prompt:
|
||||
payload["prompt"] = prompt
|
||||
if duration:
|
||||
payload["duration"] = duration
|
||||
if aspect_ratio:
|
||||
payload["aspect_ratio"] = aspect_ratio
|
||||
if seed is not None:
|
||||
payload["seed"] = seed
|
||||
|
||||
endpoint = VIDEO_POST_ENDPOINTS[model]
|
||||
raw = self._client.post(endpoint, json=payload)
|
||||
return get_task_id(raw)
|
||||
|
||||
def get_status(self, model: VideoModel, task_id: str) -> Tuple[str, dict[str, Any]]:
|
||||
"""Poll status. Returns (status_str, raw_response)."""
|
||||
endpoint = VIDEO_STATUS_ENDPOINTS[model].format(task_id=task_id)
|
||||
raw = self._client.get(endpoint)
|
||||
return get_status(raw), raw
|
||||
|
||||
def get_output_urls(self, raw: dict[str, Any]) -> list[str]:
|
||||
return get_output_urls(raw)
|
||||
Reference in New Issue
Block a user