Files
runpod/models/comfyui/workflows/advanced/animatediff-video-production-v1.json
Sebastian Krüger 45f71e646d fix: update workflow schema validation and node names
- Add missing last_link_id and links fields to all workflows
- Update node name mappings:
  - AudioSave → SaveAudio (MusicGen workflows)
  - AnimateDiffSampler → ADE_AnimateDiffSampler
  - SeedGenerator → ImpactInt
  - BatchKSampler → KSampler
  - ImageBatchToList → GetImageSize
- Fix schema validation errors across all 20 workflows

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-22 15:38:38 +01:00

247 lines
4.8 KiB
JSON

{
"last_node_id": 12,
"nodes": [
{
"id": 1,
"type": "CheckpointLoaderSimple",
"pos": [
50,
100
],
"widgets_values": [
"diffusers/stable-diffusion-xl-base-1.0"
],
"title": "SDXL Checkpoint Loader",
"flags": {},
"order": 0,
"mode": 0,
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
},
"size": {
"0": 350,
"1": 100
}
},
{
"id": 2,
"type": "AnimateDiffLoaderV1",
"pos": [
50,
300
],
"widgets_values": [
"mm_sd_v15_v2.ckpt"
],
"title": "AnimateDiff Motion Module",
"flags": {},
"order": 1,
"mode": 0,
"properties": {
"Node name for S&R": "AnimateDiffLoaderV1"
},
"size": {
"0": 350,
"1": 100
}
},
{
"id": 3,
"type": "CLIPTextEncode",
"pos": [
450,
100
],
"widgets_values": [
"A person walking through a forest, cinematic movement"
],
"title": "API Video Prompt",
"flags": {},
"order": 2,
"mode": 0,
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"size": {
"0": 400,
"1": 200
}
},
{
"id": 4,
"type": "CLIPTextEncode",
"pos": [
450,
350
],
"widgets_values": [
"static, blurry, low quality"
],
"title": "API Negative Prompt",
"flags": {},
"order": 3,
"mode": 0,
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"size": {
"0": 400,
"1": 200
}
},
{
"id": 5,
"type": "EmptyLatentImage",
"pos": [
450,
600
],
"widgets_values": [
512,
512,
16
],
"title": "API Latent Config (16 frames)",
"flags": {},
"order": 4,
"mode": 0,
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"size": {
"0": 315,
"1": 100
}
},
{
"id": 6,
"type": "ADE_AnimateDiffSampler",
"pos": [
800,
100
],
"widgets_values": [
42,
"fixed",
25,
7.5,
"dpmpp_2m",
"karras"
],
"title": "AnimateDiff Sampler",
"flags": {},
"order": 5,
"mode": 0,
"properties": {
"Node name for S&R": "AnimateDiffSampler"
},
"size": {
"0": 315,
"1": 474
}
},
{
"id": 7,
"type": "VAEDecode",
"pos": [
1150,
100
],
"title": "VAE Decode Video",
"flags": {},
"order": 6,
"mode": 0,
"properties": {
"Node name for S&R": "VAEDecode"
},
"size": {
"0": 315,
"1": 100
}
},
{
"id": 8,
"type": "VHS_VideoCombine",
"pos": [
1450,
100
],
"widgets_values": [
8,
0,
"animatediff_output",
"video/h264-mp4"
],
"title": "Combine Frames",
"flags": {},
"order": 7,
"mode": 0,
"properties": {
"Node name for S&R": "VHS_VideoCombine"
},
"size": {
"0": 315,
"1": 100
}
}
],
"extra": {
"workflow_info": {
"name": "AnimateDiff Video Production",
"version": "1.0.0",
"description": "Text-to-video generation using AnimateDiff. Create animated sequences from text prompts with natural motion.",
"category": "advanced",
"tags": [
"animatediff",
"text-to-video",
"animation",
"advanced",
"production"
],
"requirements": {
"models": [
"stable-diffusion-xl-base-1.0",
"animatediff-motion-module"
],
"custom_nodes": [
"ComfyUI-AnimateDiff-Evolved",
"ComfyUI-VideoHelperSuite"
],
"vram_min": "18GB"
},
"parameters": {
"prompt": {
"node_id": 3,
"type": "string",
"required": true,
"description": "Describe action and movement"
},
"frames": {
"node_id": 5,
"type": "integer",
"default": 16,
"description": "Number of frames (8-32)"
},
"fps": {
"node_id": 8,
"type": "integer",
"default": 8
}
},
"performance": {
"avg_generation_time": "60-90 seconds",
"vram_usage": "~16-20GB",
"output": "16 frames (~2s @ 8fps)"
},
"use_cases": [
"Text-to-video animation",
"Character animations",
"Motion graphics",
"Animated storyboards"
]
}
},
"version": 0.4,
"links": [],
"last_link_id": 0
}