Files
comfyui-extras/example_workflows/animatediff-video-production-v1.json
Sebastian Krüger d6dfdd72e2 feat: initial ComfyUI workflow templates extension
- 40 workflow templates (text-to-image, image-to-video, image-to-image,
  text-to-music, upscaling, advanced, nsfw)
- Color-coded placeholder preview images using Pillow
- Template-only extension (no custom nodes)
- Preview generation script for future workflow additions

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-26 09:16:45 +01:00

249 lines
7.1 KiB
JSON

{
"last_node_id": 10,
"last_link_id": 12,
"nodes": [
{
"id": 1,
"type": "CheckpointLoaderSimple",
"pos": [50, 100],
"size": {"0": 350, "1": 100},
"flags": {},
"order": 0,
"mode": 0,
"properties": {"Node name for S&R": "CheckpointLoaderSimple"},
"widgets_values": ["v1-5-pruned-emaonly.safetensors"],
"title": "SD 1.5 Checkpoint Loader",
"outputs": [
{"name": "MODEL", "type": "MODEL", "links": [1], "slot_index": 0},
{"name": "CLIP", "type": "CLIP", "links": [2, 3], "slot_index": 1},
{"name": "VAE", "type": "VAE", "links": [4], "slot_index": 2}
]
},
{
"id": 2,
"type": "ADE_LoadAnimateDiffModel",
"pos": [50, 300],
"size": {"0": 350, "1": 100},
"flags": {},
"order": 1,
"mode": 0,
"properties": {"Node name for S&R": "ADE_LoadAnimateDiffModel"},
"widgets_values": ["mm_sd_v15_v2.ckpt"],
"title": "Load AnimateDiff Motion Module",
"outputs": [
{"name": "MOTION_MODEL", "type": "MOTION_MODEL_ADE", "links": [5], "slot_index": 0}
]
},
{
"id": 3,
"type": "ADE_ApplyAnimateDiffModelSimple",
"pos": [450, 300],
"size": {"0": 315, "1": 100},
"flags": {},
"order": 2,
"mode": 0,
"properties": {"Node name for S&R": "ADE_ApplyAnimateDiffModelSimple"},
"inputs": [
{"name": "motion_model", "type": "MOTION_MODEL_ADE", "link": 5}
],
"outputs": [
{"name": "M_MODELS", "type": "M_MODELS", "links": [6], "slot_index": 0}
]
},
{
"id": 4,
"type": "ADE_UseEvolvedSampling",
"pos": [800, 100],
"size": {"0": 315, "1": 100},
"flags": {},
"order": 3,
"mode": 0,
"properties": {"Node name for S&R": "ADE_UseEvolvedSampling"},
"widgets_values": ["sqrt_linear (AnimateDiff)"],
"inputs": [
{"name": "model", "type": "MODEL", "link": 1},
{"name": "m_models", "type": "M_MODELS", "link": 6}
],
"outputs": [
{"name": "MODEL", "type": "MODEL", "links": [7], "slot_index": 0}
]
},
{
"id": 5,
"type": "CLIPTextEncode",
"pos": [450, 500],
"size": {"0": 400, "1": 200},
"flags": {},
"order": 4,
"mode": 0,
"properties": {"Node name for S&R": "CLIPTextEncode"},
"widgets_values": ["A person walking through a forest, cinematic movement"],
"title": "API Video Prompt",
"inputs": [
{"name": "clip", "type": "CLIP", "link": 2}
],
"outputs": [
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [8], "slot_index": 0}
]
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [450, 750],
"size": {"0": 400, "1": 200},
"flags": {},
"order": 5,
"mode": 0,
"properties": {"Node name for S&R": "CLIPTextEncode"},
"widgets_values": ["static, blurry, low quality"],
"title": "API Negative Prompt",
"inputs": [
{"name": "clip", "type": "CLIP", "link": 3}
],
"outputs": [
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [9], "slot_index": 0}
]
},
{
"id": 7,
"type": "EmptyLatentImage",
"pos": [800, 300],
"size": {"0": 315, "1": 100},
"flags": {},
"order": 6,
"mode": 0,
"properties": {"Node name for S&R": "EmptyLatentImage"},
"widgets_values": [512, 512, 16],
"title": "API Latent Config (16 frames)",
"outputs": [
{"name": "LATENT", "type": "LATENT", "links": [10], "slot_index": 0}
]
},
{
"id": 8,
"type": "KSamplerAdvanced",
"pos": [1150, 100],
"size": {"0": 315, "1": 474},
"flags": {},
"order": 7,
"mode": 0,
"properties": {"Node name for S&R": "KSamplerAdvanced"},
"widgets_values": ["enable", 42, "fixed", 20, 8.0, "euler", "normal", 0, 10000, "disable"],
"title": "AnimateDiff Sampler",
"inputs": [
{"name": "model", "type": "MODEL", "link": 7},
{"name": "positive", "type": "CONDITIONING", "link": 8},
{"name": "negative", "type": "CONDITIONING", "link": 9},
{"name": "latent_image", "type": "LATENT", "link": 10}
],
"outputs": [
{"name": "LATENT", "type": "LATENT", "links": [11], "slot_index": 0}
]
},
{
"id": 9,
"type": "VAEDecode",
"pos": [1500, 100],
"size": {"0": 315, "1": 100},
"flags": {},
"order": 8,
"mode": 0,
"properties": {"Node name for S&R": "VAEDecode"},
"title": "VAE Decode Video",
"inputs": [
{"name": "samples", "type": "LATENT", "link": 11},
{"name": "vae", "type": "VAE", "link": 4}
],
"outputs": [
{"name": "IMAGE", "type": "IMAGE", "links": [12], "slot_index": 0}
]
},
{
"id": 10,
"type": "VHS_VideoCombine",
"pos": [1800, 100],
"size": {"0": 315, "1": 100},
"flags": {},
"order": 9,
"mode": 0,
"properties": {"Node name for S&R": "VHS_VideoCombine"},
"widgets_values": [8, 0, "animatediff_output", "video/h264-mp4", false, true, "yuv420p", 19, true, false],
"title": "Combine Frames",
"inputs": [
{"name": "images", "type": "IMAGE", "link": 12}
]
}
],
"links": [
[1, 1, 0, 4, 0, "MODEL"],
[2, 1, 1, 5, 0, "CLIP"],
[3, 1, 1, 6, 0, "CLIP"],
[4, 1, 2, 9, 1, "VAE"],
[5, 2, 0, 3, 0, "MOTION_MODEL_ADE"],
[6, 3, 0, 4, 1, "M_MODELS"],
[7, 4, 0, 8, 0, "MODEL"],
[8, 5, 0, 8, 1, "CONDITIONING"],
[9, 6, 0, 8, 2, "CONDITIONING"],
[10, 7, 0, 8, 3, "LATENT"],
[11, 8, 0, 9, 0, "LATENT"],
[12, 9, 0, 10, 0, "IMAGE"]
],
"extra": {
"workflow_info": {
"name": "AnimateDiff Video Production",
"version": "1.0.0",
"description": "Text-to-video generation using AnimateDiff. Create animated sequences from text prompts with natural motion.",
"category": "advanced",
"tags": [
"animatediff",
"text-to-video",
"animation",
"advanced",
"production"
],
"requirements": {
"models": [
"stable-diffusion-v1-5",
"animatediff-motion-module-v15"
],
"custom_nodes": [
"ComfyUI-AnimateDiff-Evolved",
"ComfyUI-VideoHelperSuite"
],
"vram_min": "12GB"
},
"parameters": {
"prompt": {
"node_id": 5,
"type": "string",
"required": true,
"description": "Describe action and movement"
},
"frames": {
"node_id": 7,
"type": "integer",
"default": 16,
"description": "Number of frames (8-32)"
},
"fps": {
"node_id": 10,
"type": "integer",
"default": 8
}
},
"performance": {
"avg_generation_time": "60-90 seconds",
"vram_usage": "~16-20GB",
"output": "16 frames (~2s @ 8fps)"
},
"use_cases": [
"Text-to-video animation",
"Character animations",
"Motion graphics",
"Animated storyboards"
]
}
},
"version": 0.4
}