fix: rebuild FLUX Dev workflow with correct node types

- Replace CheckpointLoaderSimple with UNETLoader
- Replace CLIPTextEncode with DualCLIPLoader + CLIPTextEncodeFlux
- Add VAELoader with pixel_space
- Remove negative prompt (FLUX uses guidance differently)
- Set CFG to 1.0, guidance in text encoder (3.5)
- Add all node connections in links array

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-23 10:30:47 +01:00
parent 230175f33d
commit a2455ae9ee

View File

@@ -1,12 +1,12 @@
{
"last_node_id": 12,
"last_link_id": 15,
"last_node_id": 8,
"last_link_id": 12,
"nodes": [
{
"id": 1,
"type": "CheckpointLoaderSimple",
"type": "UNETLoader",
"pos": [50, 100],
"size": {"0": 350, "1": 100},
"size": [350, 100],
"flags": {},
"order": 0,
"mode": 0,
@@ -16,34 +16,44 @@
"type": "MODEL",
"links": [1],
"slot_index": 0
},
{
"name": "CLIP",
"type": "CLIP",
"links": [2, 3],
"slot_index": 1
},
{
"name": "VAE",
"type": "VAE",
"links": [4],
"slot_index": 2
}
],
"properties": {
"Node name for S&R": "CheckpointLoaderSimple"
"Node name for S&R": "UNETLoader"
},
"widgets_values": ["diffusers/FLUX.1-dev"],
"title": "FLUX Dev Checkpoint Loader"
"widgets_values": ["flux1-dev.safetensors", "default"],
"title": "FLUX Dev UNET Loader"
},
{
"id": 2,
"type": "CLIPTextEncode",
"pos": [450, 100],
"size": {"0": 400, "1": 200},
"type": "DualCLIPLoader",
"pos": [50, 250],
"size": [350, 100],
"flags": {},
"order": 1,
"mode": 0,
"outputs": [
{
"name": "CLIP",
"type": "CLIP",
"links": [2],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "DualCLIPLoader"
},
"widgets_values": ["t5xxl_fp16.safetensors", "clip_l.safetensors", "flux"],
"title": "FLUX CLIP Loader"
},
{
"id": 3,
"type": "CLIPTextEncodeFlux",
"pos": [450, 100],
"size": [400, 250],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "clip",
@@ -55,50 +65,25 @@
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [5],
"links": [3],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
"Node name for S&R": "CLIPTextEncodeFlux"
},
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD"],
"widgets_values": [
"A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD",
"A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD",
3.5
],
"title": "API Positive Prompt"
},
{
"id": 3,
"type": "CLIPTextEncode",
"pos": [450, 350],
"size": {"0": 400, "1": 200},
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "clip",
"type": "CLIP",
"link": 3
}
],
"outputs": [
{
"name": "CONDITIONING",
"type": "CONDITIONING",
"links": [6],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "CLIPTextEncode"
},
"widgets_values": ["blurry, low quality, distorted, watermark, text, signature, cartoon"],
"title": "API Negative Prompt"
},
{
"id": 4,
"type": "EmptyLatentImage",
"pos": [450, 600],
"size": {"0": 315, "1": 106},
"pos": [450, 400],
"size": [315, 106],
"flags": {},
"order": 3,
"mode": 0,
@@ -106,7 +91,7 @@
{
"name": "LATENT",
"type": "LATENT",
"links": [7],
"links": [4],
"slot_index": 0
}
],
@@ -120,7 +105,7 @@
"id": 5,
"type": "KSampler",
"pos": [900, 100],
"size": {"0": 315, "1": 474},
"size": [315, 474],
"flags": {},
"order": 4,
"mode": 0,
@@ -133,66 +118,80 @@
{
"name": "positive",
"type": "CONDITIONING",
"link": 5
"link": 3
},
{
"name": "negative",
"type": "CONDITIONING",
"link": 6
"link": null
},
{
"name": "latent_image",
"type": "LATENT",
"link": 7
"link": 4
}
],
"outputs": [
{
"name": "LATENT",
"type": "LATENT",
"links": [8],
"links": [5],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "KSampler"
},
"widgets_values": [
42,
"fixed",
20,
3.5,
"euler",
"normal",
1
],
"title": "FLUX Dev Sampler (20-50 steps)"
"widgets_values": [42, "fixed", 20, 1.0, "euler", "normal", 1.0],
"title": "FLUX Dev Sampler (20 steps)"
},
{
"id": 6,
"type": "VAEDecode",
"pos": [1270, 100],
"size": {"0": 210, "1": 46},
"type": "VAELoader",
"pos": [900, 600],
"size": [315, 60],
"flags": {},
"order": 5,
"mode": 0,
"outputs": [
{
"name": "VAE",
"type": "VAE",
"links": [6],
"slot_index": 0
}
],
"properties": {
"Node name for S&R": "VAELoader"
},
"widgets_values": ["pixel_space"],
"title": "VAE Loader"
},
{
"id": 7,
"type": "VAEDecode",
"pos": [1270, 100],
"size": [210, 46],
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "samples",
"type": "LATENT",
"link": 8
"link": 5
},
{
"name": "vae",
"type": "VAE",
"link": 4
"link": 6
}
],
"outputs": [
{
"name": "IMAGE",
"type": "IMAGE",
"links": [9, 10],
"links": [7],
"slot_index": 0
}
],
@@ -201,31 +200,11 @@
},
"title": "VAE Decode"
},
{
"id": 7,
"type": "PreviewImage",
"pos": [1530, 100],
"size": {"0": 400, "1": 400},
"flags": {},
"order": 6,
"mode": 0,
"inputs": [
{
"name": "images",
"type": "IMAGE",
"link": 9
}
],
"properties": {
"Node name for S&R": "PreviewImage"
},
"title": "Preview Output"
},
{
"id": 8,
"type": "SaveImage",
"pos": [1530, 550],
"size": {"0": 400, "1": 100},
"pos": [1530, 100],
"size": [400, 100],
"flags": {},
"order": 7,
"mode": 0,
@@ -233,7 +212,7 @@
{
"name": "images",
"type": "IMAGE",
"link": 10
"link": 7
}
],
"properties": {
@@ -245,49 +224,37 @@
],
"links": [
[1, 1, 0, 5, 0, "MODEL"],
[2, 1, 1, 2, 0, "CLIP"],
[3, 1, 1, 3, 0, "CLIP"],
[4, 1, 2, 6, 1, "VAE"],
[5, 2, 0, 5, 1, "CONDITIONING"],
[6, 3, 0, 5, 2, "CONDITIONING"],
[7, 4, 0, 5, 3, "LATENT"],
[8, 5, 0, 6, 0, "LATENT"],
[9, 6, 0, 7, 0, "IMAGE"],
[10, 6, 0, 8, 0, "IMAGE"]
[2, 2, 0, 3, 0, "CLIP"],
[3, 3, 0, 5, 1, "CONDITIONING"],
[4, 4, 0, 5, 3, "LATENT"],
[5, 5, 0, 7, 0, "LATENT"],
[6, 6, 0, 7, 1, "VAE"],
[7, 7, 0, 8, 0, "IMAGE"]
],
"groups": [],
"config": {},
"extra": {
"workflow_info": {
"name": "FLUX Dev Text-to-Image Production",
"version": "1.0.0",
"author": "RunPod AI Model Orchestrator",
"version": "1.2.0",
"description": "High-quality text-to-image generation using FLUX.1-dev (20-50 steps). Optimized for final production outputs with excellent detail and coherence.",
"category": "text-to-image",
"tags": ["flux", "dev", "high-quality", "production", "t2i"],
"requirements": {
"models": ["FLUX.1-dev"],
"models": ["FLUX.1-dev", "CLIP-L", "T5-XXL FP16"],
"custom_nodes": [],
"vram_min": "20GB",
"vram_recommended": "24GB"
},
"parameters": {
"prompt": {
"node_id": 2,
"node_id": 3,
"widget_index": 0,
"type": "string",
"required": true,
"default": "A beautiful mountain landscape at sunset",
"description": "Text description of desired image"
},
"negative_prompt": {
"node_id": 3,
"widget_index": 0,
"type": "string",
"required": false,
"default": "blurry, low quality",
"description": "Undesired elements to avoid"
},
"width": {
"node_id": 4,
"widget_index": 0,
@@ -328,15 +295,15 @@
"max": 50,
"description": "Number of sampling steps (20-50 recommended for FLUX Dev)"
},
"cfg": {
"node_id": 5,
"widget_index": 3,
"guidance": {
"node_id": 3,
"widget_index": 2,
"type": "float",
"required": false,
"default": 3.5,
"min": 1.0,
"max": 10.0,
"description": "Classifier-free guidance scale (3.5 recommended)"
"description": "Guidance scale (3.5 recommended for FLUX)"
}
},
"outputs": {
@@ -351,7 +318,14 @@
"avg_generation_time": "25-35 seconds (20 steps), 60-75 seconds (50 steps)",
"vram_usage": "~20-22GB",
"gpu_utilization": "95-100%"
}
},
"notes": [
"FLUX uses UNETLoader instead of CheckpointLoader",
"DualCLIPLoader loads both T5-XXL and CLIP-L text encoders",
"CLIPTextEncodeFlux is FLUX-specific text encoder",
"No negative prompt for FLUX - guidance is handled differently",
"CFG scale of 1.0 recommended, guidance in text encoder"
]
}
},
"version": 0.4