{ "last_node_id": 16, "last_link_id": 21, "nodes": [ { "id": 1, "type": "CheckpointLoaderSimple", "pos": [ 50, 100 ], "size": { "0": 350, "1": 100 }, "widgets_values": [ "sd_xl_base_1.0.safetensors" ], "title": "SDXL Base Checkpoint Loader", "flags": {}, "order": 0, "mode": 0, "properties": { "Node name for S&R": "CheckpointLoaderSimple" }, "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 1 ], "slot_index": 0 }, { "name": "CLIP", "type": "CLIP", "links": [ 6, 7 ], "slot_index": 1 }, { "name": "VAE", "type": "VAE", "links": [ 13 ], "slot_index": 2 } ] }, { "id": 2, "type": "LoadImage", "pos": [ 50, 300 ], "size": [ 315, 314 ], "widgets_values": [ "face_reference.png", "image" ], "title": "API Face Reference Input", "flags": {}, "order": 1, "mode": 0, "properties": { "Node name for S&R": "LoadImage" }, "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 3 ], "slot_index": 0 }, { "name": "MASK", "type": "MASK", "links": [], "slot_index": 1 } ] }, { "id": 3, "type": "IPAdapterUnifiedLoader", "pos": [ 450, 100 ], "size": { "0": 315, "1": 78 }, "widgets_values": [ "VIT-G (medium strength)" ], "title": "IP-Adapter Face Loader", "flags": {}, "order": 2, "mode": 0, "properties": { "Node name for S&R": "IPAdapterUnifiedLoader" }, "inputs": [ { "name": "model", "type": "MODEL", "link": 1 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 4 ], "slot_index": 0 }, { "name": "IPADAPTER", "type": "IPADAPTER", "links": [ 5 ], "slot_index": 1 } ] }, { "id": 4, "type": "IPAdapterAdvanced", "pos": [ 800, 100 ], "size": { "0": 315, "1": 258 }, "widgets_values": [ 0.85, "ease in-out", "average", 0.0, 1.0, "V only" ], "title": "Apply IP-Adapter Face", "flags": {}, "order": 3, "mode": 0, "properties": { "Node name for S&R": "IPAdapterAdvanced" }, "inputs": [ { "name": "model", "type": "MODEL", "link": 4 }, { "name": "ipadapter", "type": "IPADAPTER", "link": 5 }, { "name": "image", "type": "IMAGE", "link": 3 }, { "name": "clip_vision", "type": "CLIP_VISION", "link": 20 } ], "outputs": [ { "name": "MODEL", "type": "MODEL", "links": [ 8 ], "slot_index": 0 } ] }, { "id": 5, "type": "CLIPTextEncode", "pos": [ 450, 400 ], "size": { "0": 400, "1": 200 }, "widgets_values": [ "A professional portrait, studio lighting, detailed face" ], "title": "API Positive Prompt", "flags": {}, "order": 4, "mode": 0, "properties": { "Node name for S&R": "CLIPTextEncode" }, "inputs": [ { "name": "clip", "type": "CLIP", "link": 6 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 9 ], "slot_index": 0 } ] }, { "id": 6, "type": "CLIPTextEncode", "pos": [ 450, 650 ], "size": { "0": 400, "1": 200 }, "widgets_values": [ "blurry, distorted face, low quality" ], "title": "API Negative Prompt", "flags": {}, "order": 5, "mode": 0, "properties": { "Node name for S&R": "CLIPTextEncode" }, "inputs": [ { "name": "clip", "type": "CLIP", "link": 7 } ], "outputs": [ { "name": "CONDITIONING", "type": "CONDITIONING", "links": [ 10 ], "slot_index": 0 } ] }, { "id": 7, "type": "EmptyLatentImage", "pos": [ 800, 450 ], "size": { "0": 315, "1": 106 }, "widgets_values": [ 1024, 1024, 1 ], "title": "API Latent Image Config", "flags": {}, "order": 6, "mode": 0, "properties": { "Node name for S&R": "EmptyLatentImage" }, "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 11 ], "slot_index": 0 } ] }, { "id": 8, "type": "KSampler", "pos": [ 1170, 100 ], "size": { "0": 315, "1": 474 }, "widgets_values": [ 42, "fixed", 30, 6.5, "dpmpp_2m", "karras", 1 ], "title": "Sampler with Face", "flags": {}, "order": 7, "mode": 0, "properties": { "Node name for S&R": "KSampler" }, "inputs": [ { "name": "model", "type": "MODEL", "link": 8 }, { "name": "positive", "type": "CONDITIONING", "link": 9 }, { "name": "negative", "type": "CONDITIONING", "link": 10 }, { "name": "latent_image", "type": "LATENT", "link": 11 } ], "outputs": [ { "name": "LATENT", "type": "LATENT", "links": [ 12 ], "slot_index": 0 } ] }, { "id": 9, "type": "VAEDecode", "pos": [ 1540, 100 ], "size": { "0": 210, "1": 46 }, "title": "VAE Decode", "flags": {}, "order": 8, "mode": 0, "properties": { "Node name for S&R": "VAEDecode" }, "inputs": [ { "name": "samples", "type": "LATENT", "link": 12 }, { "name": "vae", "type": "VAE", "link": 13 } ], "outputs": [ { "name": "IMAGE", "type": "IMAGE", "links": [ 14, 15 ], "slot_index": 0 } ] }, { "id": 10, "type": "PreviewImage", "pos": [ 1800, 100 ], "size": { "0": 400, "1": 400 }, "title": "Preview Output", "flags": {}, "order": 9, "mode": 0, "properties": { "Node name for S&R": "PreviewImage" }, "inputs": [ { "name": "images", "type": "IMAGE", "link": 14 } ] }, { "id": 11, "type": "SaveImage", "pos": [ 1800, 550 ], "size": { "0": 400, "1": 100 }, "widgets_values": [ "ipadapter_face_output" ], "title": "API Image Output", "flags": {}, "order": 10, "mode": 0, "properties": { "Node name for S&R": "SaveImage" }, "inputs": [ { "name": "images", "type": "IMAGE", "link": 15 } ] }, { "id": 12, "type": "CLIPVisionLoader", "pos": [ 450, 250 ], "size": { "0": 315, "1": 58 }, "widgets_values": [ "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" ], "title": "CLIP Vision Loader", "flags": {}, "order": 0, "mode": 0, "properties": { "Node name for S&R": "CLIPVisionLoader" }, "outputs": [ { "name": "CLIP_VISION", "type": "CLIP_VISION", "links": [ 20 ], "slot_index": 0 } ] } ], "links": [ [ 1, 1, 0, 3, 0, "MODEL" ], [ 3, 2, 0, 4, 2, "IMAGE" ], [ 4, 3, 0, 4, 0, "MODEL" ], [ 5, 3, 1, 4, 1, "IPADAPTER" ], [ 6, 1, 1, 5, 0, "CLIP" ], [ 7, 1, 1, 6, 0, "CLIP" ], [ 8, 4, 0, 8, 0, "MODEL" ], [ 9, 5, 0, 8, 1, "CONDITIONING" ], [ 10, 6, 0, 8, 2, "CONDITIONING" ], [ 11, 7, 0, 8, 3, "LATENT" ], [ 12, 8, 0, 9, 0, "LATENT" ], [ 13, 1, 2, 9, 1, "VAE" ], [ 14, 9, 0, 10, 0, "IMAGE" ], [ 15, 9, 0, 11, 0, "IMAGE" ], [ 20, 12, 0, 4, 3, "CLIP_VISION" ] ], "extra": { "workflow_info": { "name": "IP-Adapter Face Portrait Image-to-Image Production", "version": "1.0.0", "description": "Face-focused generation using IP-Adapter Face model. Transfer facial features from reference to generate new portraits or perform face swaps.", "category": "image-to-image", "tags": [ "ipadapter", "face", "portrait", "i2i", "production" ], "requirements": { "models": [ "stable-diffusion-xl-base-1.0", "ip-adapter-face" ], "custom_nodes": [ "ComfyUI_IPAdapter_plus" ], "vram_min": "16GB" }, "parameters": { "face_image": { "node_id": 2, "type": "image", "required": true, "description": "Reference face image" }, "prompt": { "node_id": 5, "type": "string", "default": "A professional portrait", "description": "Portrait description" }, "face_weight": { "node_id": 4, "type": "float", "default": 0.85, "description": "Face similarity strength (0.85 recommended)" } }, "use_cases": [ "Portrait generation with specific face", "Face swap in different contexts", "Consistent character portraits", "Professional headshots" ] } }, "version": 0.4 }