fix: complete ComfyUI workflow schema validation

Fix all 20 production workflows to comply with ComfyUI schema requirements:
- Add missing 'flags', 'order', 'mode', 'properties', 'size' fields to all nodes
- Update deprecated node names:
  - AnimateDiffLoader → AnimateDiffLoaderV1
  - VHSVideoCombine → VHS_VideoCombine
  - IPAdapterApply → IPAdapter
  - IPAdapterApplyFace → IPAdapterFaceID
- Remove deprecated nodes: PreviewVideo, SaveVideo
- Add fix_workflows.py script for future maintenance

Changes:
- 16 workflows updated with complete schema
- 4 workflows (FLUX, SD3.5) were already valid
- All workflows now pass zod schema validation

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-22 15:30:08 +01:00
parent 19d82108b0
commit 2213ed3c85
17 changed files with 3727 additions and 300 deletions

View File

@@ -2,17 +2,256 @@
"last_node_id": 15,
"last_link_id": 20,
"nodes": [
{"id": 1, "type": "CheckpointLoaderSimple", "pos": [50, 100], "size": {"0": 350, "1": 100}, "widgets_values": ["diffusers/stable-diffusion-xl-base-1.0"], "title": "SDXL Base Checkpoint Loader"},
{"id": 2, "type": "LoadImage", "pos": [50, 300], "size": [315, 314], "widgets_values": ["face_reference.png", "image"], "title": "API Face Reference Input"},
{"id": 3, "type": "IPAdapterUnifiedLoader", "pos": [450, 100], "size": {"0": 315, "1": 78}, "widgets_values": ["FACE"], "title": "IP-Adapter Face Loader"},
{"id": 4, "type": "IPAdapterApplyFace", "pos": [800, 100], "size": {"0": 315, "1": 258}, "widgets_values": [0.85, 0.0, "original", 0.0, 1.0, true], "title": "Apply IP-Adapter Face"},
{"id": 5, "type": "CLIPTextEncode", "pos": [450, 400], "size": {"0": 400, "1": 200}, "widgets_values": ["A professional portrait, studio lighting, detailed face"], "title": "API Positive Prompt"},
{"id": 6, "type": "CLIPTextEncode", "pos": [450, 650], "size": {"0": 400, "1": 200}, "widgets_values": ["blurry, distorted face, low quality"], "title": "API Negative Prompt"},
{"id": 7, "type": "EmptyLatentImage", "pos": [800, 450], "size": {"0": 315, "1": 106}, "widgets_values": [1024, 1024, 1], "title": "API Latent Image Config"},
{"id": 8, "type": "KSampler", "pos": [1170, 100], "size": {"0": 315, "1": 474}, "widgets_values": [42, "fixed", 30, 6.5, "dpmpp_2m", "karras", 1], "title": "Sampler with Face"},
{"id": 9, "type": "VAEDecode", "pos": [1540, 100], "size": {"0": 210, "1": 46}, "title": "VAE Decode"},
{"id": 10, "type": "PreviewImage", "pos": [1800, 100], "size": {"0": 400, "1": 400}, "title": "Preview Output"},
{"id": 11, "type": "SaveImage", "pos": [1800, 550], "size": {"0": 400, "1": 100}, "widgets_values": ["ipadapter_face_output"], "title": "API Image Output"}
{
"id": 1,
"type": "CheckpointLoaderSimple",
"pos": [
50,
100
],
"size": {
"0": 350,
"1": 100
},
"widgets_values": [
"diffusers/stable-diffusion-xl-base-1.0"
],
"title": "SDXL Base Checkpoint Loader",
"flags": {},
"order": 0,
"mode": 0,
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
}
},
{
"id": 2,
"type": "LoadImage",
"pos": [
50,
300
],
"size": [
315,
314
],
"widgets_values": [
"face_reference.png",
"image"
],
"title": "API Face Reference Input",
"flags": {},
"order": 1,
"mode": 0,
"properties": {
"Node name for S&R": "LoadImage"
}
},
{
"id": 3,
"type": "IPAdapterUnifiedLoader",
"pos": [
450,
100
],
"size": {
"0": 315,
"1": 78
},
"widgets_values": [
"FACE"
],
"title": "IP-Adapter Face Loader",
"flags": {},
"order": 2,
"mode": 0,
"properties": {
"Node name for S&R": "IPAdapterUnifiedLoader"
}
},
{
"id": 4,
"type": "IPAdapterFaceID",
"pos": [
800,
100
],
"size": {
"0": 315,
"1": 258
},
"widgets_values": [
0.85,
0.0,
"original",
0.0,
1.0,
true
],
"title": "Apply IP-Adapter Face",
"flags": {},
"order": 3,
"mode": 0,
"properties": {
"Node name for S&R": "IPAdapterFaceID"
}
},
{
"id": 5,
"type": "CLIPTextEncode",
"pos": [
450,
400
],
"size": {
"0": 400,
"1": 200
},
"widgets_values": [
"A professional portrait, studio lighting, detailed face"
],
"title": "API Positive Prompt",
"flags": {},
"order": 4,
"mode": 0,
"properties": {
"Node name for S&R": "CLIPTextEncode"
}
},
{
"id": 6,
"type": "CLIPTextEncode",
"pos": [
450,
650
],
"size": {
"0": 400,
"1": 200
},
"widgets_values": [
"blurry, distorted face, low quality"
],
"title": "API Negative Prompt",
"flags": {},
"order": 5,
"mode": 0,
"properties": {
"Node name for S&R": "CLIPTextEncode"
}
},
{
"id": 7,
"type": "EmptyLatentImage",
"pos": [
800,
450
],
"size": {
"0": 315,
"1": 106
},
"widgets_values": [
1024,
1024,
1
],
"title": "API Latent Image Config",
"flags": {},
"order": 6,
"mode": 0,
"properties": {
"Node name for S&R": "EmptyLatentImage"
}
},
{
"id": 8,
"type": "KSampler",
"pos": [
1170,
100
],
"size": {
"0": 315,
"1": 474
},
"widgets_values": [
42,
"fixed",
30,
6.5,
"dpmpp_2m",
"karras",
1
],
"title": "Sampler with Face",
"flags": {},
"order": 7,
"mode": 0,
"properties": {
"Node name for S&R": "KSampler"
}
},
{
"id": 9,
"type": "VAEDecode",
"pos": [
1540,
100
],
"size": {
"0": 210,
"1": 46
},
"title": "VAE Decode",
"flags": {},
"order": 8,
"mode": 0,
"properties": {
"Node name for S&R": "VAEDecode"
}
},
{
"id": 10,
"type": "PreviewImage",
"pos": [
1800,
100
],
"size": {
"0": 400,
"1": 400
},
"title": "Preview Output",
"flags": {},
"order": 9,
"mode": 0,
"properties": {
"Node name for S&R": "PreviewImage"
}
},
{
"id": 11,
"type": "SaveImage",
"pos": [
1800,
550
],
"size": {
"0": 400,
"1": 100
},
"widgets_values": [
"ipadapter_face_output"
],
"title": "API Image Output",
"flags": {},
"order": 10,
"mode": 0,
"properties": {
"Node name for S&R": "SaveImage"
}
}
],
"links": [],
"extra": {
@@ -21,15 +260,50 @@
"version": "1.0.0",
"description": "Face-focused generation using IP-Adapter Face model. Transfer facial features from reference to generate new portraits or perform face swaps.",
"category": "image-to-image",
"tags": ["ipadapter", "face", "portrait", "i2i", "production"],
"requirements": {"models": ["stable-diffusion-xl-base-1.0", "ip-adapter-face"], "custom_nodes": ["ComfyUI_IPAdapter_plus"], "vram_min": "16GB"},
"parameters": {
"face_image": {"node_id": 2, "type": "image", "required": true, "description": "Reference face image"},
"prompt": {"node_id": 5, "type": "string", "default": "A professional portrait", "description": "Portrait description"},
"face_weight": {"node_id": 4, "type": "float", "default": 0.85, "description": "Face similarity strength (0.85 recommended)"}
"tags": [
"ipadapter",
"face",
"portrait",
"i2i",
"production"
],
"requirements": {
"models": [
"stable-diffusion-xl-base-1.0",
"ip-adapter-face"
],
"custom_nodes": [
"ComfyUI_IPAdapter_plus"
],
"vram_min": "16GB"
},
"use_cases": ["Portrait generation with specific face", "Face swap in different contexts", "Consistent character portraits", "Professional headshots"]
"parameters": {
"face_image": {
"node_id": 2,
"type": "image",
"required": true,
"description": "Reference face image"
},
"prompt": {
"node_id": 5,
"type": "string",
"default": "A professional portrait",
"description": "Portrait description"
},
"face_weight": {
"node_id": 4,
"type": "float",
"default": 0.85,
"description": "Face similarity strength (0.85 recommended)"
}
},
"use_cases": [
"Portrait generation with specific face",
"Face swap in different contexts",
"Consistent character portraits",
"Professional headshots"
]
}
},
"version": 0.4
}
}