Files
runpod/comfyui/workflows/text-to-image/flux-dev-t2i-production-v1.json
Sebastian Krüger 30cc2513cb fix: add ConditioningZeroOut for FLUX workflow negative input
FLUX models require negative conditioning even though they don't use it.
Added ConditioningZeroOut node to create empty negative conditioning from
positive output, satisfying KSampler's required negative input.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-23 10:35:22 +01:00

363 lines
8.6 KiB
JSON

{
"last_node_id": 9,
"last_link_id": 13,
"nodes": [
{
"id": 1,
"type": "UNETLoader",
"pos": [50, 100],
"size": [350, 100],
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "MODEL",
"type": "MODEL",
"links": [1],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "UNETLoader"
},
"widgets_values": ["flux1-dev.safetensors", "default"],
"title": "FLUX Dev UNET Loader"
},
{
"id": 2,
"type": "DualCLIPLoader",
"pos": [50, 250],
"size": [350, 100],
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [2],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": ["t5xxl_fp16.safetensors", "clip_l.safetensors", "flux"],
"title": "FLUX CLIP Loader"
},
{
"id": 3,
"type": "CLIPTextEncodeFlux",
"pos": [450, 100],
"size": [400, 250],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 2
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [3],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncodeFlux"
},
"widgets_values": [
"A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD",
"A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD",
3.5
],
"title": "API Positive Prompt"
},
{
"id": 9,
"type": "ConditioningZeroOut",
"pos": [450, 400],
"size": [315, 60],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "conditioning",
"type": "CONDITIONING",
"link": 8
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [9],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "ConditioningZeroOut"
},
"title": "Empty Negative Conditioning"
},
{
"id": 4,
"type": "EmptyLatentImage",
"pos": [450, 500],
"size": [315, 106],
"flags": {},
"order": 4,
"mode": 0,
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [4],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "EmptyLatentImage"
},
"widgets_values": [1024, 1024, 1],
"title": "API Latent Image Config"
},
{
"id": 5,
"type": "KSampler",
"pos": [900, 100],
"size": [315, 474],
"flags": {},
"order": 5,
"mode": 0,
"inputs": [
{
"name": "model",
"type": "MODEL",
"link": 1
},
{
"name": "positive",
"type": "CONDITIONING",
"link": 3
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 9
},
{
"name": "latent_image",
"type": "LATENT",
"link": 4
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [5],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [42, "fixed", 20, 1.0, "euler", "normal", 1.0],
"title": "FLUX Dev Sampler (20 steps)"
},
{
"id": 6,
"type": "VAELoader",
"pos": [900, 600],
"size": [315, 60],
"flags": {},
"order": 6,
"mode": 0,
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [6],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": ["pixel_space"],
"title": "VAE Loader"
},
{
"id": 7,
"type": "VAEDecode",
"pos": [1270, 100],
"size": [210, 46],
"flags": {},
"order": 7,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 5
},
{
"name": "vae",
"type": "VAE",
"link": 6
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [7],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAEDecode"
},
"title": "VAE Decode"
},
{
"id": 8,
"type": "SaveImage",
"pos": [1530, 100],
"size": [400, 100],
"flags": {},
"order": 8,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 7
}
],
"properties": {
"Node name for S&R": "SaveImage"
},
"widgets_values": ["flux_dev_output"],
"title": "API Image Output"
}
],
"links": [
[1, 1, 0, 5, 0, "MODEL"],
[2, 2, 0, 3, 0, "CLIP"],
[3, 3, 0, 5, 1, "CONDITIONING"],
[4, 4, 0, 5, 3, "LATENT"],
[5, 5, 0, 7, 0, "LATENT"],
[6, 6, 0, 7, 1, "VAE"],
[7, 7, 0, 8, 0, "IMAGE"],
[8, 3, 0, 9, 0, "CONDITIONING"],
[9, 9, 0, 5, 2, "CONDITIONING"]
],
"groups": [],
"config": {},
"extra": {
"workflow_info": {
"name": "FLUX Dev Text-to-Image Production",
"version": "1.2.0",
"description": "High-quality text-to-image generation using FLUX.1-dev (20-50 steps). Optimized for final production outputs with excellent detail and coherence.",
"category": "text-to-image",
"tags": ["flux", "dev", "high-quality", "production", "t2i"],
"requirements": {
"models": ["FLUX.1-dev", "CLIP-L", "T5-XXL FP16"],
"custom_nodes": [],
"vram_min": "20GB",
"vram_recommended": "24GB"
},
"parameters": {
"prompt": {
"node_id": 3,
"widget_index": 0,
"type": "string",
"required": true,
"default": "A beautiful mountain landscape at sunset",
"description": "Text description of desired image"
},
"width": {
"node_id": 4,
"widget_index": 0,
"type": "integer",
"required": false,
"default": 1024,
"min": 512,
"max": 2048,
"description": "Image width in pixels"
},
"height": {
"node_id": 4,
"widget_index": 1,
"type": "integer",
"required": false,
"default": 1024,
"min": 512,
"max": 2048,
"description": "Image height in pixels"
},
"seed": {
"node_id": 5,
"widget_index": 0,
"type": "integer",
"required": false,
"default": 42,
"min": 0,
"max": 4294967295,
"description": "Random seed for reproducibility"
},
"steps": {
"node_id": 5,
"widget_index": 2,
"type": "integer",
"required": false,
"default": 20,
"min": 10,
"max": 50,
"description": "Number of sampling steps (20-50 recommended for FLUX Dev)"
},
"guidance": {
"node_id": 3,
"widget_index": 2,
"type": "float",
"required": false,
"default": 3.5,
"min": 1.0,
"max": 10.0,
"description": "Guidance scale (3.5 recommended for FLUX)"
}
},
"outputs": {
"image": {
"node_id": 8,
"type": "image",
"format": "PNG",
"resolution": "1024x1024 (configurable)"
}
},
"performance": {
"avg_generation_time": "25-35 seconds (20 steps), 60-75 seconds (50 steps)",
"vram_usage": "~20-22GB",
"gpu_utilization": "95-100%"
},
"notes": [
"FLUX uses UNETLoader instead of CheckpointLoader",
"DualCLIPLoader loads both T5-XXL and CLIP-L text encoders",
"CLIPTextEncodeFlux is FLUX-specific text encoder",
"No negative prompt for FLUX - guidance is handled differently",
"CFG scale of 1.0 recommended, guidance in text encoder"
]
}
},
"version": 0.4
}