refactor: reorganize directory structure and remove hardcoded paths
Move comfyui and vllm out of models/ directory to top level for better organization. Replace all hardcoded /workspace paths with relative paths to make the configuration portable across different environments. Changes: - Move models/comfyui/ → comfyui/ - Move models/vllm/ → vllm/ - Remove models/ directory (empty) - Update arty.yml: replace /workspace with environment variables - Update supervisord.conf: use relative paths from /workspace/ai - Update all script references to use new paths - Maintain TQDM_DISABLE=1 to fix BrokenPipeError 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
309
comfyui/fix_workflows.py
Executable file
309
comfyui/fix_workflows.py
Executable file
@@ -0,0 +1,309 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyUI Workflow Schema Fixer
|
||||
|
||||
Fixes missing schema fields in ComfyUI workflow JSON files:
|
||||
- Adds missing 'flags', 'order', 'mode', 'properties', 'size' fields to nodes
|
||||
- Reconstructs 'inputs' and 'outputs' arrays from links
|
||||
- Builds complete 'links' array
|
||||
- Updates outdated node names
|
||||
|
||||
Usage:
|
||||
python3 fix_workflows.py <workflow_directory>
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
from pathlib import Path
|
||||
from typing import Dict, List, Any
|
||||
|
||||
# Node name mapping (old → new)
|
||||
NODE_NAME_MAPPING = {
|
||||
'AnimateDiffLoader': 'AnimateDiffLoaderV1',
|
||||
'VHSVideoCombine': 'VHS_VideoCombine',
|
||||
'PreviewVideo': None, # Remove - use VHS_VideoCombine with preview enabled
|
||||
'SaveVideo': None, # Remove - use VHS_VideoCombine
|
||||
'IPAdapterApply': 'IPAdapter',
|
||||
'IPAdapterApplyFace': 'IPAdapterFaceID',
|
||||
'AudioSave': 'SaveAudio',
|
||||
'AnimateDiffSampler': 'KSamplerAdvanced', # AnimateDiff uses standard KSampler
|
||||
'ADE_AnimateDiffSampler': 'KSamplerAdvanced',
|
||||
'SeedGenerator': 'ImpactInt', # Use Impact Pack integer node for seed generation
|
||||
'BatchKSampler': 'KSampler', # Standard KSampler can handle batches
|
||||
'ImageBatchToList': 'GetImageSize', # Placeholder - may need manual adjustment
|
||||
}
|
||||
|
||||
# Default node sizes by category
|
||||
NODE_SIZES = {
|
||||
'Loader': {'0': 350, '1': 100},
|
||||
'Sampler': {'0': 315, '1': 474},
|
||||
'Encoder': {'0': 400, '1': 200},
|
||||
'Default': {'0': 315, '1': 100},
|
||||
}
|
||||
|
||||
def get_node_size(node_type: str) -> Dict[str, int]:
|
||||
"""Get appropriate size for node based on type"""
|
||||
if 'Loader' in node_type or 'Load' in node_type:
|
||||
return NODE_SIZES['Loader']
|
||||
elif 'Sampler' in node_type or 'KSampler' in node_type:
|
||||
return NODE_SIZES['Sampler']
|
||||
elif 'Encode' in node_type or 'CLIP' in node_type:
|
||||
return NODE_SIZES['Encoder']
|
||||
else:
|
||||
return NODE_SIZES['Default']
|
||||
|
||||
def fix_workflow(workflow_path: Path) -> bool:
|
||||
"""Fix a single workflow file"""
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Processing: {workflow_path.name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
try:
|
||||
with open(workflow_path, 'r') as f:
|
||||
workflow = json.load(f)
|
||||
except json.JSONDecodeError as e:
|
||||
print(f"✗ ERROR: Invalid JSON - {e}")
|
||||
return False
|
||||
|
||||
if 'nodes' not in workflow:
|
||||
print(f"✗ ERROR: No 'nodes' key in workflow")
|
||||
return False
|
||||
|
||||
nodes = workflow['nodes']
|
||||
links = workflow.get('links', [])
|
||||
|
||||
# Track changes
|
||||
changes = {
|
||||
'added_flags': 0,
|
||||
'added_order': 0,
|
||||
'added_mode': 0,
|
||||
'added_properties': 0,
|
||||
'added_size': 0,
|
||||
'added_inputs': 0,
|
||||
'added_outputs': 0,
|
||||
'updated_node_names': 0,
|
||||
'removed_nodes': 0,
|
||||
'added_last_link_id': 0,
|
||||
'added_links': 0,
|
||||
}
|
||||
|
||||
# Build link index for quick lookup
|
||||
link_index = {}
|
||||
for link in links:
|
||||
if len(link) >= 6:
|
||||
link_id, src_node_id, src_slot, tgt_node_id, tgt_slot, data_type = link[:6]
|
||||
link_index[link_id] = {
|
||||
'source': {'node_id': src_node_id, 'slot': src_slot},
|
||||
'target': {'node_id': tgt_node_id, 'slot': tgt_slot},
|
||||
'type': data_type
|
||||
}
|
||||
|
||||
# Build node ID index
|
||||
node_by_id = {node['id']: node for node in nodes}
|
||||
|
||||
# Process each node
|
||||
nodes_to_remove = []
|
||||
for i, node in enumerate(nodes):
|
||||
node_id = node.get('id')
|
||||
node_type = node.get('type', '')
|
||||
|
||||
# Update node name if needed
|
||||
if node_type in NODE_NAME_MAPPING:
|
||||
new_name = NODE_NAME_MAPPING[node_type]
|
||||
if new_name is None:
|
||||
# Mark for removal
|
||||
nodes_to_remove.append(i)
|
||||
changes['removed_nodes'] += 1
|
||||
print(f" Removing deprecated node {node_id}: {node_type}")
|
||||
continue
|
||||
else:
|
||||
print(f" Updating node {node_id}: {node_type} → {new_name}")
|
||||
node['type'] = new_name
|
||||
node_type = new_name
|
||||
changes['updated_node_names'] += 1
|
||||
|
||||
# Add missing flags
|
||||
if 'flags' not in node:
|
||||
node['flags'] = {}
|
||||
changes['added_flags'] += 1
|
||||
|
||||
# Add missing order (will recalculate later based on dependencies)
|
||||
if 'order' not in node:
|
||||
node['order'] = i # Temporary order
|
||||
changes['added_order'] += 1
|
||||
|
||||
# Add missing mode (0 = execute, 4 = bypass)
|
||||
if 'mode' not in node:
|
||||
node['mode'] = 0
|
||||
changes['added_mode'] += 1
|
||||
|
||||
# Add missing properties
|
||||
if 'properties' not in node:
|
||||
node['properties'] = {"Node name for S&R": node_type}
|
||||
changes['added_properties'] += 1
|
||||
|
||||
# Add missing size
|
||||
if 'size' not in node:
|
||||
node['size'] = get_node_size(node_type)
|
||||
changes['added_size'] += 1
|
||||
|
||||
# Reconstruct inputs from links
|
||||
if 'inputs' not in node or not node['inputs']:
|
||||
node_inputs = []
|
||||
for link_id, link_data in link_index.items():
|
||||
if link_data['target']['node_id'] == node_id:
|
||||
# This link targets this node
|
||||
# We need to know the input name, but we don't have it
|
||||
# For now, create a placeholder
|
||||
node_inputs.append({
|
||||
'name': f'input_{link_data["target"]["slot"]}',
|
||||
'type': link_data['type'],
|
||||
'link': link_id
|
||||
})
|
||||
if node_inputs:
|
||||
node['inputs'] = node_inputs
|
||||
changes['added_inputs'] += 1
|
||||
|
||||
# Reconstruct outputs from links
|
||||
if 'outputs' not in node or not node['outputs']:
|
||||
node_outputs = {}
|
||||
for link_id, link_data in link_index.items():
|
||||
if link_data['source']['node_id'] == node_id:
|
||||
slot = link_data['source']['slot']
|
||||
if slot not in node_outputs:
|
||||
node_outputs[slot] = {
|
||||
'name': f'output_{slot}',
|
||||
'type': link_data['type'],
|
||||
'links': [],
|
||||
'slot_index': slot
|
||||
}
|
||||
node_outputs[slot]['links'].append(link_id)
|
||||
|
||||
if node_outputs:
|
||||
node['outputs'] = list(node_outputs.values())
|
||||
changes['added_outputs'] += 1
|
||||
|
||||
# Remove deprecated nodes
|
||||
for i in reversed(nodes_to_remove):
|
||||
del nodes[i]
|
||||
|
||||
# Recalculate execution order based on dependencies
|
||||
if changes['added_order'] > 0 or changes['removed_nodes'] > 0:
|
||||
calculate_execution_order(nodes, link_index)
|
||||
|
||||
# Add missing links array
|
||||
if 'links' not in workflow:
|
||||
workflow['links'] = []
|
||||
changes['added_links'] = 1
|
||||
|
||||
# Add missing last_link_id
|
||||
if 'last_link_id' not in workflow:
|
||||
# Calculate from existing links
|
||||
max_link_id = 0
|
||||
if workflow.get('links'):
|
||||
for link in workflow['links']:
|
||||
if link and len(link) > 0:
|
||||
max_link_id = max(max_link_id, link[0])
|
||||
workflow['last_link_id'] = max_link_id
|
||||
changes['added_last_link_id'] = 1
|
||||
|
||||
# Update workflow
|
||||
workflow['nodes'] = nodes
|
||||
|
||||
# Print summary
|
||||
print(f"\nChanges made:")
|
||||
for key, value in changes.items():
|
||||
if value > 0:
|
||||
print(f" • {key.replace('_', ' ').title()}: {value}")
|
||||
|
||||
total_changes = sum(changes.values())
|
||||
if total_changes == 0:
|
||||
print(f" ✓ No changes needed - workflow already valid")
|
||||
return True
|
||||
|
||||
# Save fixed workflow
|
||||
try:
|
||||
with open(workflow_path, 'w') as f:
|
||||
json.dump(workflow, f, indent=2)
|
||||
print(f"\n✓ Successfully fixed and saved workflow")
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"\n✗ ERROR saving workflow: {e}")
|
||||
return False
|
||||
|
||||
def calculate_execution_order(nodes: List[Dict], link_index: Dict):
|
||||
"""Calculate execution order based on node dependencies"""
|
||||
# Build dependency graph
|
||||
dependencies = {}
|
||||
node_by_id = {node['id']: node for node in nodes}
|
||||
|
||||
for node in nodes:
|
||||
node_id = node['id']
|
||||
dependencies[node_id] = set()
|
||||
|
||||
# Find all nodes this node depends on (inputs)
|
||||
for link_id, link_data in link_index.items():
|
||||
if link_data['target']['node_id'] == node_id:
|
||||
# This node depends on the source node
|
||||
dependencies[node_id].add(link_data['source']['node_id'])
|
||||
|
||||
# Topological sort to determine execution order
|
||||
visited = set()
|
||||
order_counter = [0]
|
||||
|
||||
def visit(node_id):
|
||||
if node_id in visited:
|
||||
return
|
||||
visited.add(node_id)
|
||||
|
||||
# Visit dependencies first
|
||||
for dep_id in dependencies.get(node_id, []):
|
||||
if dep_id in node_by_id: # Skip if dependency not in current nodes
|
||||
visit(dep_id)
|
||||
|
||||
# Assign order
|
||||
if node_id in node_by_id:
|
||||
node_by_id[node_id]['order'] = order_counter[0]
|
||||
order_counter[0] += 1
|
||||
|
||||
# Visit all nodes
|
||||
for node_id in node_by_id.keys():
|
||||
visit(node_id)
|
||||
|
||||
def main():
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: python3 fix_workflows.py <workflow_directory>")
|
||||
sys.exit(1)
|
||||
|
||||
workflow_dir = Path(sys.argv[1])
|
||||
|
||||
if not workflow_dir.exists():
|
||||
print(f"Error: Directory {workflow_dir} does not exist")
|
||||
sys.exit(1)
|
||||
|
||||
# Find all JSON files recursively
|
||||
workflow_files = list(workflow_dir.rglob('*.json'))
|
||||
|
||||
if not workflow_files:
|
||||
print(f"No workflow JSON files found in {workflow_dir}")
|
||||
sys.exit(1)
|
||||
|
||||
print(f"\nFound {len(workflow_files)} workflow files")
|
||||
|
||||
# Process each workflow
|
||||
success_count = 0
|
||||
for workflow_path in sorted(workflow_files):
|
||||
if fix_workflow(workflow_path):
|
||||
success_count += 1
|
||||
|
||||
# Summary
|
||||
print(f"\n{'='*60}")
|
||||
print(f"SUMMARY")
|
||||
print(f"{'='*60}")
|
||||
print(f"Total workflows: {len(workflow_files)}")
|
||||
print(f"Successfully fixed: {success_count}")
|
||||
print(f"Failed: {len(workflow_files) - success_count}")
|
||||
print(f"{'='*60}\n")
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
15
comfyui/requirements.txt
Normal file
15
comfyui/requirements.txt
Normal file
@@ -0,0 +1,15 @@
|
||||
torch
|
||||
torchvision
|
||||
torchaudio
|
||||
transformers
|
||||
diffusers>=0.31.0
|
||||
accelerate
|
||||
safetensors
|
||||
omegaconf
|
||||
einops
|
||||
kornia
|
||||
spandrel
|
||||
soundfile
|
||||
scikit-image
|
||||
piexif
|
||||
segment-anything
|
||||
29
comfyui/start.sh
Normal file
29
comfyui/start.sh
Normal file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# ComfyUI Startup Script
|
||||
# Starts ComfyUI server on port 8188
|
||||
#
|
||||
|
||||
WORKSPACE_DIR="${WORKSPACE_DIR:-/workspace}"
|
||||
COMFYUI_DIR="${WORKSPACE_DIR}/ComfyUI"
|
||||
HF_CACHE="${WORKSPACE_DIR}/huggingface_cache"
|
||||
|
||||
# Set environment variables
|
||||
export PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
||||
export HF_HOME="${HF_CACHE}"
|
||||
|
||||
# Navigate to ComfyUI directory
|
||||
cd "${COMFYUI_DIR}" || exit 1
|
||||
|
||||
echo "Starting ComfyUI on port 8188..."
|
||||
echo "Access at: http://localhost:8188"
|
||||
echo "Using HuggingFace cache: ${HF_CACHE}"
|
||||
|
||||
# Start ComfyUI with GPU support
|
||||
python3 main.py \
|
||||
--listen 0.0.0.0 \
|
||||
--port 8188 \
|
||||
--enable-cors-header \
|
||||
--preview-method auto
|
||||
|
||||
echo "ComfyUI stopped"
|
||||
287
comfyui/workflows/README.md
Normal file
287
comfyui/workflows/README.md
Normal file
@@ -0,0 +1,287 @@
|
||||
# ComfyUI Production Workflows
|
||||
|
||||
Comprehensive collection of production-ready ComfyUI workflows for RunPod AI Model Orchestrator.
|
||||
|
||||
## Overview
|
||||
|
||||
This directory contains 20 sophisticated, battle-tested workflows designed for production use with the RunPod orchestrator. Each workflow is optimized for 24GB VRAM and includes API compatibility, error handling, and quality gates.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
workflows/
|
||||
├── text-to-image/ # Text-to-image generation workflows
|
||||
├── image-to-image/ # Image-to-image transformation workflows
|
||||
├── image-to-video/ # Image-to-video animation workflows
|
||||
├── text-to-music/ # Text-to-music generation workflows
|
||||
├── upscaling/ # Image upscaling and enhancement workflows
|
||||
├── advanced/ # Advanced multi-model workflows
|
||||
├── templates/ # Reusable workflow templates
|
||||
├── README.md # This file
|
||||
└── WORKFLOW_STANDARDS.md # Workflow development standards
|
||||
```
|
||||
|
||||
## Workflows by Category
|
||||
|
||||
### Text-to-Image (4 workflows)
|
||||
|
||||
| Workflow | Model | Speed | Quality | Use Case |
|
||||
|----------|-------|-------|---------|----------|
|
||||
| `flux-schnell-t2i-production-v1.json` | FLUX.1-schnell | Fast (4 steps) | Good | Rapid prototyping, iteration |
|
||||
| `flux-dev-t2i-production-v1.json` | FLUX.1-dev | Medium (20-50 steps) | Excellent | High-quality final images |
|
||||
| `sdxl-refiner-t2i-production-v1.json` | SDXL + Refiner | Medium (30+20 steps) | Excellent | Detailed, refined outputs |
|
||||
| `sd35-large-t2i-production-v1.json` | SD3.5-large | Medium (28 steps) | Excellent | Latest Stable Diffusion |
|
||||
|
||||
### Image-to-Image (3 workflows)
|
||||
|
||||
| Workflow | Technique | Use Case |
|
||||
|----------|-----------|----------|
|
||||
| `ipadapter-style-i2i-production-v1.json` | IP-Adapter | Style transfer, composition |
|
||||
| `ipadapter-face-i2i-production-v1.json` | IP-Adapter + Face | Portrait generation, face swap |
|
||||
| `ipadapter-composition-i2i-production-v1.json` | IP-Adapter Multi | Complex scene composition |
|
||||
|
||||
### Image-to-Video (3 workflows)
|
||||
|
||||
| Workflow | Model | Length | Use Case |
|
||||
|----------|-------|--------|----------|
|
||||
| `cogvideox-i2v-production-v1.json` | CogVideoX-5b | 6s @ 8fps | AI-driven video generation |
|
||||
| `svd-i2v-production-v1.json` | SVD | 14 frames | Quick animations |
|
||||
| `svd-xt-i2v-production-v1.json` | SVD-XT | 25 frames | Extended animations |
|
||||
|
||||
### Text-to-Music (4 workflows)
|
||||
|
||||
| Workflow | Model | Duration | Use Case |
|
||||
|----------|-------|----------|----------|
|
||||
| `musicgen-small-t2m-production-v1.json` | MusicGen-small | 30s | Fast generation, low VRAM |
|
||||
| `musicgen-medium-t2m-production-v1.json` | MusicGen-medium | 30s | Balanced quality/speed |
|
||||
| `musicgen-large-t2m-production-v1.json` | MusicGen-large | 30s | Highest quality |
|
||||
| `musicgen-melody-t2m-production-v1.json` | MusicGen-melody | 30s | Melody conditioning |
|
||||
|
||||
### Upscaling (3 workflows)
|
||||
|
||||
| Workflow | Technique | Scale | Use Case |
|
||||
|----------|-----------|-------|----------|
|
||||
| `ultimate-sd-upscale-production-v1.json` | Ultimate SD | 2x-4x | Professional upscaling with detailing |
|
||||
| `simple-upscale-production-v1.json` | Model-based | 2x-4x | Fast, straightforward upscaling |
|
||||
| `face-upscale-production-v1.json` | Face-focused | 2x | Portrait enhancement |
|
||||
|
||||
### Advanced (3 workflows)
|
||||
|
||||
| Workflow | Technique | Use Case |
|
||||
|----------|-----------|----------|
|
||||
| `controlnet-fusion-production-v1.json` | Multi-ControlNet | Precise composition control |
|
||||
| `animatediff-video-production-v1.json` | AnimateDiff | Text-to-video animation |
|
||||
| `batch-pipeline-production-v1.json` | Batch processing | Multiple variations |
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Using with ComfyUI Web Interface
|
||||
|
||||
1. Open ComfyUI at `http://localhost:8188`
|
||||
2. Click "Load" button
|
||||
3. Navigate to `/workspace/ai/models/comfyui/workflows/`
|
||||
4. Select desired workflow category and file
|
||||
5. Adjust parameters as needed
|
||||
6. Click "Queue Prompt"
|
||||
|
||||
### Using with RunPod Orchestrator API
|
||||
|
||||
```bash
|
||||
# Example: FLUX Schnell text-to-image
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"workflow": "text-to-image/flux-schnell-t2i-production-v1.json",
|
||||
"inputs": {
|
||||
"prompt": "A serene mountain landscape at sunset",
|
||||
"seed": 42,
|
||||
"steps": 4
|
||||
}
|
||||
}'
|
||||
|
||||
# Example: Image upscaling
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"workflow": "upscaling/ultimate-sd-upscale-production-v1.json",
|
||||
"inputs": {
|
||||
"image": "path/to/image.png",
|
||||
"scale": 2
|
||||
}
|
||||
}'
|
||||
```
|
||||
|
||||
## Workflow Features
|
||||
|
||||
All production workflows include:
|
||||
|
||||
- **API Compatibility**: Input/output nodes for orchestrator integration
|
||||
- **Error Handling**: Validation, fallback nodes, graceful degradation
|
||||
- **Quality Gates**: Preview nodes, checkpoints, validation steps
|
||||
- **VRAM Optimization**: Model unloading, efficient memory management
|
||||
- **Documentation**: Embedded descriptions, parameter guides
|
||||
- **Versioning**: Semantic versioning in filenames
|
||||
|
||||
## Model Requirements
|
||||
|
||||
### Required Models (Essential)
|
||||
|
||||
These models are required by most workflows and are auto-downloaded by Ansible:
|
||||
|
||||
- **FLUX.1-schnell**: Fast text-to-image (17GB)
|
||||
- **FLUX.1-dev**: High-quality text-to-image (23GB)
|
||||
- **SDXL Base + Refiner**: Stable Diffusion XL (13GB)
|
||||
- **SD3.5-large**: Latest Stable Diffusion (16GB)
|
||||
- **CLIP ViT-L/14**: Image-text understanding (1.7GB)
|
||||
|
||||
### Optional Models
|
||||
|
||||
- **CogVideoX-5b**: Text-to-video, image-to-video (9.7GB)
|
||||
- **SVD/SVD-XT**: Image-to-video (10GB)
|
||||
- **MusicGen variants**: Text-to-music (1.5-3.4GB)
|
||||
- **IP-Adapter**: Image conditioning (varies)
|
||||
- **ControlNet models**: Precise control (varies)
|
||||
|
||||
Check `/workspace/ai/COMFYUI_MODELS.md` for complete model list.
|
||||
|
||||
## VRAM Considerations
|
||||
|
||||
All workflows are designed for **24GB VRAM** with these optimizations:
|
||||
|
||||
- **Sequential Loading**: Only one heavy model loaded at a time
|
||||
- **Model Unloading**: Explicit cleanup between stages
|
||||
- **Attention Slicing**: Enabled for large models
|
||||
- **VAE Tiling**: For high-resolution processing
|
||||
- **Batch Size Limits**: Capped at VRAM-safe values
|
||||
|
||||
## Performance Tips
|
||||
|
||||
### For Speed
|
||||
- Use FLUX Schnell (4 steps) or SDXL base (20 steps)
|
||||
- Lower resolution: 512x512 or 768x768
|
||||
- Disable refiners and upscalers
|
||||
- Use `--lowvram` flag if needed
|
||||
|
||||
### For Quality
|
||||
- Use FLUX Dev (50 steps) or SDXL + Refiner
|
||||
- Higher resolution: 1024x1024 or higher
|
||||
- Enable face enhancement (Impact-Pack)
|
||||
- Use Ultimate SD Upscale for final output
|
||||
|
||||
### For VRAM Efficiency
|
||||
- Enable model unloading between stages
|
||||
- Use VAE tiling for >1024px images
|
||||
- Process batches sequentially, not in parallel
|
||||
- Monitor with `nvidia-smi` during generation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Out of Memory (OOM) Errors
|
||||
|
||||
```bash
|
||||
# Check VRAM usage
|
||||
nvidia-smi
|
||||
|
||||
# Solutions:
|
||||
# 1. Lower resolution
|
||||
# 2. Reduce batch size
|
||||
# 3. Enable model unloading
|
||||
# 4. Use tiled VAE
|
||||
# 5. Restart ComfyUI to clear VRAM
|
||||
supervisorctl restart comfyui
|
||||
```
|
||||
|
||||
### Missing Models
|
||||
|
||||
```bash
|
||||
# Check which models are linked
|
||||
ls -lah /workspace/ComfyUI/models/diffusers/
|
||||
ls -lah /workspace/ComfyUI/models/clip_vision/
|
||||
|
||||
# Re-run Ansible to download missing models
|
||||
cd /workspace/ai
|
||||
ansible-playbook playbook.yml --tags comfyui-models-all
|
||||
|
||||
# Re-link models
|
||||
arty run models/link-comfyui
|
||||
```
|
||||
|
||||
### Workflow Load Errors
|
||||
|
||||
```bash
|
||||
# Check ComfyUI logs
|
||||
supervisorctl tail -f comfyui
|
||||
|
||||
# Common issues:
|
||||
# - Missing custom nodes: Check custom_nodes/ directory
|
||||
# - Node version mismatch: Update ComfyUI and custom nodes
|
||||
# - Corrupted workflow: Validate JSON syntax
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Creating New Workflows
|
||||
|
||||
See `WORKFLOW_STANDARDS.md` for detailed guidelines on creating production-ready workflows.
|
||||
|
||||
Quick checklist:
|
||||
- [ ] Use semantic versioning in filename
|
||||
- [ ] Add API input/output nodes
|
||||
- [ ] Include preview and save nodes
|
||||
- [ ] Add error handling and validation
|
||||
- [ ] Optimize for 24GB VRAM
|
||||
- [ ] Document all parameters
|
||||
- [ ] Test with orchestrator API
|
||||
|
||||
### Testing Workflows
|
||||
|
||||
```bash
|
||||
# Manual test via ComfyUI UI
|
||||
# 1. Load workflow in ComfyUI
|
||||
# 2. Set test parameters
|
||||
# 3. Queue prompt
|
||||
# 4. Verify output quality
|
||||
|
||||
# API test via orchestrator
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-H "Content-Type: application/json" \
|
||||
-d @test-payload.json
|
||||
|
||||
# Batch test multiple workflows
|
||||
cd /workspace/ai/models/comfyui/workflows
|
||||
for workflow in text-to-image/*.json; do
|
||||
echo "Testing $workflow..."
|
||||
# Add test logic here
|
||||
done
|
||||
```
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new workflows:
|
||||
|
||||
1. Follow naming convention: `{category}-{model}-{type}-production-v{version}.json`
|
||||
2. Place in appropriate category directory
|
||||
3. Update this README with workflow details
|
||||
4. Add to `comfyui_models.yaml` if new models are required
|
||||
5. Test with both UI and API
|
||||
6. Document any special requirements or setup
|
||||
|
||||
## Resources
|
||||
|
||||
- **ComfyUI Documentation**: https://github.com/comfyanonymous/ComfyUI
|
||||
- **Custom Nodes Manager**: Install via ComfyUI-Manager in UI
|
||||
- **Model Registry**: `/workspace/ai/model-orchestrator/models.yaml`
|
||||
- **Ansible Playbook**: `/workspace/ai/playbook.yml`
|
||||
- **Orchestrator API**: http://localhost:9000/docs
|
||||
|
||||
## License
|
||||
|
||||
MIT License - Part of RunPod AI Model Orchestrator
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
1. Check ComfyUI logs: `supervisorctl tail -f comfyui`
|
||||
2. Check orchestrator logs: `supervisorctl tail -f orchestrator`
|
||||
3. Review `/workspace/ai/CLAUDE.md` for troubleshooting
|
||||
4. Check GPU status: `nvidia-smi`
|
||||
657
comfyui/workflows/WORKFLOW_STANDARDS.md
Normal file
657
comfyui/workflows/WORKFLOW_STANDARDS.md
Normal file
@@ -0,0 +1,657 @@
|
||||
# ComfyUI Workflow Development Standards
|
||||
|
||||
Production standards and best practices for creating ComfyUI workflows in the RunPod AI Model Orchestrator.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Naming Conventions](#naming-conventions)
|
||||
- [Workflow Structure](#workflow-structure)
|
||||
- [API Integration](#api-integration)
|
||||
- [Error Handling](#error-handling)
|
||||
- [VRAM Optimization](#vram-optimization)
|
||||
- [Quality Assurance](#quality-assurance)
|
||||
- [Documentation Requirements](#documentation-requirements)
|
||||
- [Testing Guidelines](#testing-guidelines)
|
||||
|
||||
## Naming Conventions
|
||||
|
||||
### Workflow Files
|
||||
|
||||
Format: `{category}-{model}-{type}-{environment}-v{version}.json`
|
||||
|
||||
**Components:**
|
||||
- `category`: Descriptive category (flux, sdxl, cogvideox, musicgen, etc.)
|
||||
- `model`: Specific model variant (schnell, dev, small, medium, large)
|
||||
- `type`: Operation type (t2i, i2i, i2v, t2m, upscale)
|
||||
- `environment`: `production` (stable) or `experimental` (testing)
|
||||
- `version`: Semantic versioning (1, 2, 3, etc.)
|
||||
|
||||
**Examples:**
|
||||
- `flux-schnell-t2i-production-v1.json` - FLUX Schnell text-to-image, production version 1
|
||||
- `sdxl-refiner-t2i-production-v2.json` - SDXL with refiner, production version 2
|
||||
- `musicgen-large-t2m-experimental-v1.json` - MusicGen large, experimental version 1
|
||||
|
||||
### Node Naming
|
||||
|
||||
**Descriptive names for all nodes:**
|
||||
```json
|
||||
{
|
||||
"title": "FLUX Schnell Checkpoint Loader",
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Naming patterns:**
|
||||
- Loaders: `{Model} Checkpoint Loader`, `{Model} VAE Loader`
|
||||
- Samplers: `{Model} KSampler`, `{Model} Advanced Sampler`
|
||||
- Inputs: `API Text Input`, `API Image Input`, `API Seed Input`
|
||||
- Outputs: `API Image Output`, `Preview Output`, `Save Output`
|
||||
- Processing: `VAE Encode`, `VAE Decode`, `CLIP Text Encode`
|
||||
|
||||
## Workflow Structure
|
||||
|
||||
### Required Node Groups
|
||||
|
||||
Every production workflow MUST include these node groups:
|
||||
|
||||
#### 1. Input Group
|
||||
```
|
||||
Purpose: Receive parameters from API or UI
|
||||
Nodes:
|
||||
- Text input nodes (prompts, negative prompts)
|
||||
- Numeric input nodes (seed, steps, CFG scale)
|
||||
- Image input nodes (for i2i, i2v workflows)
|
||||
- Model selection nodes (if multiple models supported)
|
||||
```
|
||||
|
||||
#### 2. Model Loading Group
|
||||
```
|
||||
Purpose: Load required models and components
|
||||
Nodes:
|
||||
- Checkpoint/Diffuser loaders
|
||||
- VAE loaders
|
||||
- CLIP text encoders
|
||||
- ControlNet loaders (if applicable)
|
||||
- IP-Adapter loaders (if applicable)
|
||||
```
|
||||
|
||||
#### 3. Processing Group
|
||||
```
|
||||
Purpose: Main generation/transformation logic
|
||||
Nodes:
|
||||
- Samplers (KSampler, Advanced KSampler)
|
||||
- Encoders (CLIP, VAE)
|
||||
- Conditioning nodes
|
||||
- ControlNet application (if applicable)
|
||||
```
|
||||
|
||||
#### 4. Post-Processing Group
|
||||
```
|
||||
Purpose: Refinement and enhancement
|
||||
Nodes:
|
||||
- VAE decoding
|
||||
- Upscaling (if applicable)
|
||||
- Face enhancement (Impact-Pack)
|
||||
- Image adjustments
|
||||
```
|
||||
|
||||
#### 5. Output Group
|
||||
```
|
||||
Purpose: Save and return results
|
||||
Nodes:
|
||||
- SaveImage nodes (for file output)
|
||||
- Preview nodes (for UI feedback)
|
||||
- API output nodes (for orchestrator)
|
||||
```
|
||||
|
||||
#### 6. Error Handling Group (Optional but Recommended)
|
||||
```
|
||||
Purpose: Validation and fallback
|
||||
Nodes:
|
||||
- Validation nodes
|
||||
- Fallback nodes
|
||||
- Error logging nodes
|
||||
```
|
||||
|
||||
### Node Organization
|
||||
|
||||
**Logical flow (left to right, top to bottom):**
|
||||
```
|
||||
[Inputs] → [Model Loading] → [Processing] → [Post-Processing] → [Outputs]
|
||||
↓
|
||||
[Error Handling]
|
||||
```
|
||||
|
||||
**Visual grouping:**
|
||||
- Use node positions to create visual separation
|
||||
- Group related nodes together
|
||||
- Align nodes for readability
|
||||
- Use consistent spacing
|
||||
|
||||
## API Integration
|
||||
|
||||
### Input Nodes
|
||||
|
||||
**Required for API compatibility:**
|
||||
|
||||
1. **Text Inputs** (prompts, negative prompts)
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"text": "A beautiful sunset over mountains",
|
||||
"default": ""
|
||||
},
|
||||
"class_type": "CLIPTextEncode",
|
||||
"title": "API Prompt Input"
|
||||
}
|
||||
```
|
||||
|
||||
2. **Numeric Inputs** (seed, steps, CFG, etc.)
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"seed": 42,
|
||||
"steps": 20,
|
||||
"cfg": 7.5,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "normal"
|
||||
},
|
||||
"class_type": "KSampler",
|
||||
"title": "API Sampler Config"
|
||||
}
|
||||
```
|
||||
|
||||
3. **Image Inputs** (for i2i workflows)
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"image": "",
|
||||
"upload": "image"
|
||||
},
|
||||
"class_type": "LoadImage",
|
||||
"title": "API Image Input"
|
||||
}
|
||||
```
|
||||
|
||||
### Output Nodes
|
||||
|
||||
**Required for orchestrator return:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"images": ["node_id", 0],
|
||||
"filename_prefix": "ComfyUI"
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"title": "API Image Output"
|
||||
}
|
||||
```
|
||||
|
||||
### Parameter Validation
|
||||
|
||||
**Include validation for critical parameters:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"value": "seed",
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"default": 42
|
||||
},
|
||||
"class_type": "IntegerInput",
|
||||
"title": "Seed Validator"
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Required Validations
|
||||
|
||||
1. **Model Availability**
|
||||
- Check if checkpoint files exist
|
||||
- Validate model paths
|
||||
- Provide fallback to default models
|
||||
|
||||
2. **Parameter Bounds**
|
||||
- Validate numeric ranges (seed, steps, CFG)
|
||||
- Check dimension constraints (width, height)
|
||||
- Validate string inputs (sampler names, scheduler types)
|
||||
|
||||
3. **VRAM Limits**
|
||||
- Check batch size against VRAM
|
||||
- Validate resolution against VRAM
|
||||
- Enable tiling for large images
|
||||
|
||||
4. **Input Validation**
|
||||
- Verify required inputs are provided
|
||||
- Check image formats and dimensions
|
||||
- Validate prompt lengths
|
||||
|
||||
### Fallback Strategies
|
||||
|
||||
**Default values for missing inputs:**
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"text": "{{prompt | default('A beautiful landscape')}}",
|
||||
"seed": "{{seed | default(42)}}",
|
||||
"steps": "{{steps | default(20)}}"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
**Graceful degradation:**
|
||||
- If refiner unavailable, skip refinement step
|
||||
- If upscaler fails, return base resolution
|
||||
- If face enhancement errors, return unenhanced image
|
||||
|
||||
## VRAM Optimization
|
||||
|
||||
### Model Unloading
|
||||
|
||||
**Explicit model cleanup between stages:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"model": ["checkpoint_loader", 0]
|
||||
},
|
||||
"class_type": "FreeModel",
|
||||
"title": "Unload Base Model"
|
||||
}
|
||||
```
|
||||
|
||||
**When to unload:**
|
||||
- After base generation, before refinement
|
||||
- After refinement, before upscaling
|
||||
- Between different model types (diffusion → CLIP → VAE)
|
||||
|
||||
### VAE Tiling
|
||||
|
||||
**Enable for high-resolution processing:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"samples": ["sampler", 0],
|
||||
"vae": ["vae_loader", 0],
|
||||
"tile_size": 512,
|
||||
"overlap": 64
|
||||
},
|
||||
"class_type": "VAEDecodeTiled",
|
||||
"title": "Tiled VAE Decode"
|
||||
}
|
||||
```
|
||||
|
||||
**Tiling thresholds:**
|
||||
- Use tiled VAE for images >1024x1024
|
||||
- Tile size: 512 for 24GB VRAM, 256 for lower
|
||||
- Overlap: 64px minimum for seamless tiles
|
||||
|
||||
### Attention Slicing
|
||||
|
||||
**Reduce memory for large models:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"model": ["checkpoint_loader", 0],
|
||||
"attention_mode": "sliced"
|
||||
},
|
||||
"class_type": "ModelOptimization",
|
||||
"title": "Enable Attention Slicing"
|
||||
}
|
||||
```
|
||||
|
||||
### Batch Processing
|
||||
|
||||
**VRAM-safe batch sizes:**
|
||||
- FLUX models: batch_size=1
|
||||
- SDXL: batch_size=1-2
|
||||
- SD3.5: batch_size=1
|
||||
- Upscaling: batch_size=1
|
||||
|
||||
**Sequential batching:**
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"mode": "sequential",
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "BatchProcessor"
|
||||
}
|
||||
```
|
||||
|
||||
## Quality Assurance
|
||||
|
||||
### Preview Nodes
|
||||
|
||||
**Include preview at key stages:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"images": ["vae_decode", 0]
|
||||
},
|
||||
"class_type": "PreviewImage",
|
||||
"title": "Preview Base Generation"
|
||||
}
|
||||
```
|
||||
|
||||
**Preview locations:**
|
||||
- After base generation (before refinement)
|
||||
- After refinement (before upscaling)
|
||||
- After upscaling (final check)
|
||||
- After face enhancement
|
||||
|
||||
### Quality Gates
|
||||
|
||||
**Checkpoints for validation:**
|
||||
|
||||
1. **Resolution Check**
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"image": ["input", 0],
|
||||
"min_width": 512,
|
||||
"min_height": 512,
|
||||
"max_width": 2048,
|
||||
"max_height": 2048
|
||||
},
|
||||
"class_type": "ImageSizeValidator"
|
||||
}
|
||||
```
|
||||
|
||||
2. **Quality Metrics**
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"image": ["vae_decode", 0],
|
||||
"min_quality_score": 0.7
|
||||
},
|
||||
"class_type": "QualityChecker"
|
||||
}
|
||||
```
|
||||
|
||||
### Save Points
|
||||
|
||||
**Save intermediate results:**
|
||||
|
||||
```json
|
||||
{
|
||||
"inputs": {
|
||||
"images": ["base_generation", 0],
|
||||
"filename_prefix": "intermediate/base_"
|
||||
},
|
||||
"class_type": "SaveImage",
|
||||
"title": "Save Base Generation"
|
||||
}
|
||||
```
|
||||
|
||||
**When to save:**
|
||||
- Base generation (before refinement)
|
||||
- After each major processing stage
|
||||
- Before potentially destructive operations
|
||||
|
||||
## Documentation Requirements
|
||||
|
||||
### Workflow Metadata
|
||||
|
||||
**Include in workflow JSON:**
|
||||
|
||||
```json
|
||||
{
|
||||
"workflow_info": {
|
||||
"name": "FLUX Schnell Text-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "Fast text-to-image generation using FLUX.1-schnell (4 steps)",
|
||||
"category": "text-to-image",
|
||||
"tags": ["flux", "fast", "production"],
|
||||
"requirements": {
|
||||
"models": ["FLUX.1-schnell"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "16GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"seed": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295
|
||||
},
|
||||
"steps": {
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 4,
|
||||
"min": 1,
|
||||
"max": 20
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Node Comments
|
||||
|
||||
**Document complex nodes:**
|
||||
|
||||
```json
|
||||
{
|
||||
"title": "FLUX KSampler - Main Generation",
|
||||
"notes": "Using euler_ancestral sampler with 4 steps for FLUX Schnell. CFG=1.0 is optimal for this model. Seed controls reproducibility.",
|
||||
"inputs": {
|
||||
"seed": 42,
|
||||
"steps": 4,
|
||||
"cfg": 1.0
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Usage Examples
|
||||
|
||||
**Include in workflow or README:**
|
||||
|
||||
```markdown
|
||||
## Example Usage
|
||||
|
||||
### ComfyUI Web Interface
|
||||
1. Load workflow: `text-to-image/flux-schnell-t2i-production-v1.json`
|
||||
2. Set prompt: "A serene mountain landscape at sunset"
|
||||
3. Adjust seed: 42 (optional)
|
||||
4. Click "Queue Prompt"
|
||||
|
||||
### Orchestrator API
|
||||
```bash
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d '{"workflow": "flux-schnell-t2i-production-v1.json", "inputs": {"prompt": "A serene mountain landscape"}}'
|
||||
```
|
||||
```
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### Manual Testing
|
||||
|
||||
**Required tests before production:**
|
||||
|
||||
1. **UI Test**
|
||||
- Load in ComfyUI web interface
|
||||
- Execute with default parameters
|
||||
- Verify output quality
|
||||
- Check preview nodes
|
||||
- Confirm save locations
|
||||
|
||||
2. **API Test**
|
||||
- Call via orchestrator API
|
||||
- Test with various parameter combinations
|
||||
- Verify JSON response format
|
||||
- Check error handling
|
||||
|
||||
3. **Edge Cases**
|
||||
- Missing optional parameters
|
||||
- Invalid parameter values
|
||||
- Out-of-range inputs
|
||||
- Missing models (graceful failure)
|
||||
|
||||
### Automated Testing
|
||||
|
||||
**Test script template:**
|
||||
|
||||
```bash
|
||||
#!/bin/bash
|
||||
# Test workflow: flux-schnell-t2i-production-v1.json
|
||||
|
||||
WORKFLOW="text-to-image/flux-schnell-t2i-production-v1.json"
|
||||
|
||||
# Test 1: Default parameters
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d "{\"workflow\": \"$WORKFLOW\", \"inputs\": {\"prompt\": \"test image\"}}" \
|
||||
| jq '.status' # Should return "success"
|
||||
|
||||
# Test 2: Custom parameters
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d "{\"workflow\": \"$WORKFLOW\", \"inputs\": {\"prompt\": \"test\", \"seed\": 123, \"steps\": 8}}" \
|
||||
| jq '.status'
|
||||
|
||||
# Test 3: Missing prompt (should use default)
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d "{\"workflow\": \"$WORKFLOW\", \"inputs\": {}}" \
|
||||
| jq '.status'
|
||||
```
|
||||
|
||||
### Performance Testing
|
||||
|
||||
**Measure key metrics:**
|
||||
|
||||
```bash
|
||||
# Generation time
|
||||
time curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d '{"workflow": "flux-schnell-t2i-production-v1.json", "inputs": {"prompt": "benchmark"}}'
|
||||
|
||||
# VRAM usage
|
||||
nvidia-smi --query-gpu=memory.used --format=csv,noheader,nounits -l 1
|
||||
|
||||
# GPU utilization
|
||||
nvidia-smi --query-gpu=utilization.gpu --format=csv,noheader,nounits -l 1
|
||||
```
|
||||
|
||||
**Performance baselines (24GB VRAM):**
|
||||
- FLUX Schnell (1024x1024, 4 steps): ~5-8 seconds
|
||||
- FLUX Dev (1024x1024, 20 steps): ~25-35 seconds
|
||||
- SDXL + Refiner (1024x1024): ~40-60 seconds
|
||||
- CogVideoX (6s video): ~120-180 seconds
|
||||
|
||||
### Load Testing
|
||||
|
||||
**Concurrent request handling:**
|
||||
|
||||
```bash
|
||||
# Test 5 concurrent generations
|
||||
for i in {1..5}; do
|
||||
curl -X POST http://localhost:9000/api/comfyui/generate \
|
||||
-d "{\"workflow\": \"flux-schnell-t2i-production-v1.json\", \"inputs\": {\"prompt\": \"test $i\", \"seed\": $i}}" &
|
||||
done
|
||||
wait
|
||||
```
|
||||
|
||||
## Version Control
|
||||
|
||||
### Semantic Versioning
|
||||
|
||||
**Version increments:**
|
||||
- `v1` → `v2`: Major changes (different models, restructured workflow)
|
||||
- Internal iterations: Keep same version, document changes in git commits
|
||||
|
||||
### Change Documentation
|
||||
|
||||
**Changelog format:**
|
||||
|
||||
```markdown
|
||||
## flux-schnell-t2i-production-v2.json
|
||||
|
||||
### Changes from v1
|
||||
- Added API input validation
|
||||
- Optimized VRAM usage with model unloading
|
||||
- Added preview node after generation
|
||||
- Updated default steps from 4 to 6
|
||||
|
||||
### Breaking Changes
|
||||
- Changed output node structure (requires orchestrator update)
|
||||
|
||||
### Migration Guide
|
||||
- Update API calls to use new parameter names
|
||||
- Clear ComfyUI cache before loading v2
|
||||
```
|
||||
|
||||
### Deprecation Process
|
||||
|
||||
**Sunsetting old versions:**
|
||||
|
||||
1. Mark old version as deprecated in README
|
||||
2. Keep deprecated version for 2 releases
|
||||
3. Add deprecation warning in workflow metadata
|
||||
4. Document migration path to new version
|
||||
5. Archive deprecated workflows in `archive/` directory
|
||||
|
||||
## Best Practices
|
||||
|
||||
### DO
|
||||
|
||||
- Use descriptive node names
|
||||
- Include preview nodes at key stages
|
||||
- Validate all inputs
|
||||
- Optimize for VRAM efficiency
|
||||
- Document all parameters
|
||||
- Test with both UI and API
|
||||
- Version your workflows
|
||||
- Include error handling
|
||||
- Save intermediate results
|
||||
- Use semantic naming
|
||||
|
||||
### DON'T
|
||||
|
||||
- Hardcode file paths
|
||||
- Assume unlimited VRAM
|
||||
- Skip input validation
|
||||
- Omit documentation
|
||||
- Create overly complex workflows
|
||||
- Use experimental nodes in production
|
||||
- Ignore VRAM optimization
|
||||
- Skip testing edge cases
|
||||
- Use unclear node names
|
||||
- Forget to version
|
||||
|
||||
## Resources
|
||||
|
||||
- **ComfyUI Wiki**: https://github.com/comfyanonymous/ComfyUI/wiki
|
||||
- **Custom Nodes List**: https://github.com/ltdrdata/ComfyUI-Manager
|
||||
- **VRAM Optimization Guide**: `/workspace/ai/CLAUDE.md`
|
||||
- **Model Documentation**: `/workspace/ai/COMFYUI_MODELS.md`
|
||||
|
||||
## Support
|
||||
|
||||
For questions or issues:
|
||||
1. Review this standards document
|
||||
2. Check ComfyUI logs: `supervisorctl tail -f comfyui`
|
||||
3. Test workflow in UI before API
|
||||
4. Validate JSON syntax
|
||||
5. Check model availability
|
||||
248
comfyui/workflows/advanced/animatediff-video-production-v1.json
Normal file
248
comfyui/workflows/advanced/animatediff-video-production-v1.json
Normal file
@@ -0,0 +1,248 @@
|
||||
{
|
||||
"last_node_id": 10,
|
||||
"last_link_id": 12,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CheckpointLoaderSimple"},
|
||||
"widgets_values": ["v1-5-pruned-emaonly.safetensors"],
|
||||
"title": "SD 1.5 Checkpoint Loader",
|
||||
"outputs": [
|
||||
{"name": "MODEL", "type": "MODEL", "links": [1], "slot_index": 0},
|
||||
{"name": "CLIP", "type": "CLIP", "links": [2, 3], "slot_index": 1},
|
||||
{"name": "VAE", "type": "VAE", "links": [4], "slot_index": 2}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "ADE_LoadAnimateDiffModel",
|
||||
"pos": [50, 300],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ADE_LoadAnimateDiffModel"},
|
||||
"widgets_values": ["mm_sd_v15_v2.ckpt"],
|
||||
"title": "Load AnimateDiff Motion Module",
|
||||
"outputs": [
|
||||
{"name": "MOTION_MODEL", "type": "MOTION_MODEL_ADE", "links": [5], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "ADE_ApplyAnimateDiffModelSimple",
|
||||
"pos": [450, 300],
|
||||
"size": {"0": 315, "1": 100},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ADE_ApplyAnimateDiffModelSimple"},
|
||||
"inputs": [
|
||||
{"name": "motion_model", "type": "MOTION_MODEL_ADE", "link": 5}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "M_MODELS", "type": "M_MODELS", "links": [6], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "ADE_UseEvolvedSampling",
|
||||
"pos": [800, 100],
|
||||
"size": {"0": 315, "1": 100},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ADE_UseEvolvedSampling"},
|
||||
"widgets_values": ["sqrt_linear (AnimateDiff)"],
|
||||
"inputs": [
|
||||
{"name": "model", "type": "MODEL", "link": 1},
|
||||
{"name": "m_models", "type": "M_MODELS", "link": 6}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "MODEL", "type": "MODEL", "links": [7], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 500],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["A person walking through a forest, cinematic movement"],
|
||||
"title": "API Video Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 2}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [8], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 750],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["static, blurry, low quality"],
|
||||
"title": "API Negative Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 3}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [9], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [800, 300],
|
||||
"size": {"0": 315, "1": 100},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "EmptyLatentImage"},
|
||||
"widgets_values": [512, 512, 16],
|
||||
"title": "API Latent Config (16 frames)",
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [10], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "KSamplerAdvanced",
|
||||
"pos": [1150, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "KSamplerAdvanced"},
|
||||
"widgets_values": ["enable", 42, "fixed", 20, 8.0, "euler", "normal", 0, 10000, "disable"],
|
||||
"title": "AnimateDiff Sampler",
|
||||
"inputs": [
|
||||
{"name": "model", "type": "MODEL", "link": 7},
|
||||
{"name": "positive", "type": "CONDITIONING", "link": 8},
|
||||
{"name": "negative", "type": "CONDITIONING", "link": 9},
|
||||
{"name": "latent_image", "type": "LATENT", "link": 10}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [11], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1500, 100],
|
||||
"size": {"0": 315, "1": 100},
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "VAEDecode"},
|
||||
"title": "VAE Decode Video",
|
||||
"inputs": [
|
||||
{"name": "samples", "type": "LATENT", "link": 11},
|
||||
{"name": "vae", "type": "VAE", "link": 4}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "IMAGE", "type": "IMAGE", "links": [12], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "VHS_VideoCombine",
|
||||
"pos": [1800, 100],
|
||||
"size": {"0": 315, "1": 100},
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "VHS_VideoCombine"},
|
||||
"widgets_values": [8, 0, "animatediff_output", "video/h264-mp4", false, true, "yuv420p", 19, true, false],
|
||||
"title": "Combine Frames",
|
||||
"inputs": [
|
||||
{"name": "images", "type": "IMAGE", "link": 12}
|
||||
]
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 4, 0, "MODEL"],
|
||||
[2, 1, 1, 5, 0, "CLIP"],
|
||||
[3, 1, 1, 6, 0, "CLIP"],
|
||||
[4, 1, 2, 9, 1, "VAE"],
|
||||
[5, 2, 0, 3, 0, "MOTION_MODEL_ADE"],
|
||||
[6, 3, 0, 4, 1, "M_MODELS"],
|
||||
[7, 4, 0, 8, 0, "MODEL"],
|
||||
[8, 5, 0, 8, 1, "CONDITIONING"],
|
||||
[9, 6, 0, 8, 2, "CONDITIONING"],
|
||||
[10, 7, 0, 8, 3, "LATENT"],
|
||||
[11, 8, 0, 9, 0, "LATENT"],
|
||||
[12, 9, 0, 10, 0, "IMAGE"]
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "AnimateDiff Video Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Text-to-video generation using AnimateDiff. Create animated sequences from text prompts with natural motion.",
|
||||
"category": "advanced",
|
||||
"tags": [
|
||||
"animatediff",
|
||||
"text-to-video",
|
||||
"animation",
|
||||
"advanced",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-v1-5",
|
||||
"animatediff-motion-module-v15"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI-AnimateDiff-Evolved",
|
||||
"ComfyUI-VideoHelperSuite"
|
||||
],
|
||||
"vram_min": "12GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 5,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Describe action and movement"
|
||||
},
|
||||
"frames": {
|
||||
"node_id": 7,
|
||||
"type": "integer",
|
||||
"default": 16,
|
||||
"description": "Number of frames (8-32)"
|
||||
},
|
||||
"fps": {
|
||||
"node_id": 10,
|
||||
"type": "integer",
|
||||
"default": 8
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "60-90 seconds",
|
||||
"vram_usage": "~16-20GB",
|
||||
"output": "16 frames (~2s @ 8fps)"
|
||||
},
|
||||
"use_cases": [
|
||||
"Text-to-video animation",
|
||||
"Character animations",
|
||||
"Motion graphics",
|
||||
"Animated storyboards"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
247
comfyui/workflows/advanced/batch-pipeline-production-v1.json
Normal file
247
comfyui/workflows/advanced/batch-pipeline-production-v1.json
Normal file
@@ -0,0 +1,247 @@
|
||||
{
|
||||
"last_node_id": 10,
|
||||
"last_link_id": 10,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CheckpointLoaderSimple"},
|
||||
"widgets_values": ["sd_xl_base_1.0.safetensors"],
|
||||
"title": "SDXL Base Loader",
|
||||
"outputs": [
|
||||
{"name": "MODEL", "type": "MODEL", "links": [1], "slot_index": 0},
|
||||
{"name": "CLIP", "type": "CLIP", "links": [2, 3], "slot_index": 1},
|
||||
{"name": "VAE", "type": "VAE", "links": [4], "slot_index": 2}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["A beautiful landscape"],
|
||||
"title": "API Base Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 2}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [5], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["blurry, low quality"],
|
||||
"title": "API Negative Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 3}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [6], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "EmptyLatentImage"},
|
||||
"widgets_values": [1024, 1024, 4],
|
||||
"title": "API Latent Config (Batch=4)",
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [7], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "KSampler",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "KSampler"},
|
||||
"widgets_values": [42, "fixed", 20, 7.0, "euler", "normal", 1],
|
||||
"title": "Batch Sampler (4 variations)",
|
||||
"inputs": [
|
||||
{"name": "model", "type": "MODEL", "link": 1},
|
||||
{"name": "positive", "type": "CONDITIONING", "link": 5},
|
||||
{"name": "negative", "type": "CONDITIONING", "link": 6},
|
||||
{"name": "latent_image", "type": "LATENT", "link": 7}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [8], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "VAEDecode"},
|
||||
"title": "VAE Decode Batch",
|
||||
"inputs": [
|
||||
{"name": "samples", "type": "LATENT", "link": 8},
|
||||
{"name": "vae", "type": "VAE", "link": 4}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "IMAGE", "type": "IMAGE", "links": [9, 10], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1530, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "PreviewImage"},
|
||||
"title": "Preview All Variations",
|
||||
"inputs": [
|
||||
{"name": "images", "type": "IMAGE", "link": 9}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "SaveImage",
|
||||
"pos": [1530, 550],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "SaveImage"},
|
||||
"widgets_values": ["batch_output"],
|
||||
"title": "API Save All",
|
||||
"inputs": [
|
||||
{"name": "images", "type": "IMAGE", "link": 10}
|
||||
]
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 5, 0, "MODEL"],
|
||||
[2, 1, 1, 2, 0, "CLIP"],
|
||||
[3, 1, 1, 3, 0, "CLIP"],
|
||||
[4, 1, 2, 6, 1, "VAE"],
|
||||
[5, 2, 0, 5, 1, "CONDITIONING"],
|
||||
[6, 3, 0, 5, 2, "CONDITIONING"],
|
||||
[7, 4, 0, 5, 3, "LATENT"],
|
||||
[8, 5, 0, 6, 0, "LATENT"],
|
||||
[9, 6, 0, 7, 0, "IMAGE"],
|
||||
[10, 6, 0, 8, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Batch Pipeline Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Batch generation pipeline for multiple variations. Generate 4 images simultaneously with different seeds for rapid iteration using Stable Diffusion XL.",
|
||||
"category": "advanced",
|
||||
"tags": [
|
||||
"batch",
|
||||
"multi-generation",
|
||||
"variations",
|
||||
"advanced",
|
||||
"production",
|
||||
"flux"
|
||||
],
|
||||
"requirements": {
|
||||
"models": ["stable-diffusion-xl-base-1.0"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "20GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A beautiful landscape",
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 3,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"batch_count": {
|
||||
"node_id": 4,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 4,
|
||||
"min": 1,
|
||||
"max": 8,
|
||||
"description": "Number of variations to generate (batch size)"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 5,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 20,
|
||||
"min": 15,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps (20-30 recommended for SDXL)"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"images": {
|
||||
"node_id": 8,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"count": 4,
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "45-60 seconds for 4 images (20 steps)",
|
||||
"vram_usage": "~20-24GB (depends on batch size)",
|
||||
"gpu_utilization": "95-100%"
|
||||
},
|
||||
"use_cases": [
|
||||
"Rapid prototyping with multiple variations",
|
||||
"Concept exploration and A/B testing",
|
||||
"Client presentations with options",
|
||||
"Quick iteration workflows"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
399
comfyui/workflows/advanced/controlnet-fusion-production-v1.json
Normal file
399
comfyui/workflows/advanced/controlnet-fusion-production-v1.json
Normal file
@@ -0,0 +1,399 @@
|
||||
{
|
||||
"last_node_id": 14,
|
||||
"last_link_id": 18,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CheckpointLoaderSimple"},
|
||||
"widgets_values": ["sd_xl_base_1.0.safetensors"],
|
||||
"title": "SDXL Base Loader",
|
||||
"outputs": [
|
||||
{"name": "MODEL", "type": "MODEL", "links": [1], "slot_index": 0},
|
||||
{"name": "CLIP", "type": "CLIP", "links": [2, 3], "slot_index": 1},
|
||||
{"name": "VAE", "type": "VAE", "links": [4], "slot_index": 2}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["A futuristic city with precise architecture"],
|
||||
"title": "API Positive Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 2}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [9], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "CLIPTextEncode"},
|
||||
"widgets_values": ["blurry, low quality, distorted"],
|
||||
"title": "API Negative Prompt",
|
||||
"inputs": [
|
||||
{"name": "clip", "type": "CLIP", "link": 3}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "CONDITIONING", "type": "CONDITIONING", "links": [10], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "ControlNetLoader",
|
||||
"pos": [50, 300],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ControlNetLoader"},
|
||||
"widgets_values": ["controlnet-depth-sdxl-1.0.safetensors"],
|
||||
"title": "Load Depth ControlNet",
|
||||
"outputs": [
|
||||
{"name": "CONTROL_NET", "type": "CONTROL_NET", "links": [7], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "LoadImage",
|
||||
"pos": [50, 450],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "LoadImage"},
|
||||
"widgets_values": ["examples/control_depth.png"],
|
||||
"title": "API Depth Image",
|
||||
"outputs": [
|
||||
{"name": "IMAGE", "type": "IMAGE", "links": [5], "slot_index": 0},
|
||||
{"name": "MASK", "type": "MASK", "links": null, "slot_index": 1}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "ControlNetApplyAdvanced",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 186},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ControlNetApplyAdvanced"},
|
||||
"widgets_values": [0.8, 0.0, 1.0],
|
||||
"title": "Apply Depth Control",
|
||||
"inputs": [
|
||||
{"name": "positive", "type": "CONDITIONING", "link": 9},
|
||||
{"name": "negative", "type": "CONDITIONING", "link": 10},
|
||||
{"name": "control_net", "type": "CONTROL_NET", "link": 7},
|
||||
{"name": "image", "type": "IMAGE", "link": 5}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "positive", "type": "CONDITIONING", "links": [11], "slot_index": 0},
|
||||
{"name": "negative", "type": "CONDITIONING", "links": [12], "slot_index": 1}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "ControlNetLoader",
|
||||
"pos": [50, 600],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ControlNetLoader"},
|
||||
"widgets_values": ["controlnet-canny-sdxl-1.0.safetensors"],
|
||||
"title": "Load Canny ControlNet",
|
||||
"outputs": [
|
||||
{"name": "CONTROL_NET", "type": "CONTROL_NET", "links": [8], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "LoadImage",
|
||||
"pos": [50, 750],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "LoadImage"},
|
||||
"widgets_values": ["examples/control_canny.png"],
|
||||
"title": "API Canny Image",
|
||||
"outputs": [
|
||||
{"name": "IMAGE", "type": "IMAGE", "links": [6], "slot_index": 0},
|
||||
{"name": "MASK", "type": "MASK", "links": null, "slot_index": 1}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "ControlNetApplyAdvanced",
|
||||
"pos": [900, 400],
|
||||
"size": {"0": 315, "1": 186},
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "ControlNetApplyAdvanced"},
|
||||
"widgets_values": [0.6, 0.0, 1.0],
|
||||
"title": "Apply Canny Control",
|
||||
"inputs": [
|
||||
{"name": "positive", "type": "CONDITIONING", "link": 11},
|
||||
{"name": "negative", "type": "CONDITIONING", "link": 12},
|
||||
{"name": "control_net", "type": "CONTROL_NET", "link": 8},
|
||||
{"name": "image", "type": "IMAGE", "link": 6}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "positive", "type": "CONDITIONING", "links": [13], "slot_index": 0},
|
||||
{"name": "negative", "type": "CONDITIONING", "links": [14], "slot_index": 1}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "EmptyLatentImage"},
|
||||
"widgets_values": [1024, 1024, 1],
|
||||
"title": "API Latent Config",
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [15], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"type": "KSampler",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 10,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "KSampler"},
|
||||
"widgets_values": [42, "fixed", 30, 7.0, "euler", "normal", 1],
|
||||
"title": "ControlNet Sampler",
|
||||
"inputs": [
|
||||
{"name": "model", "type": "MODEL", "link": 1},
|
||||
{"name": "positive", "type": "CONDITIONING", "link": 13},
|
||||
{"name": "negative", "type": "CONDITIONING", "link": 14},
|
||||
{"name": "latent_image", "type": "LATENT", "link": 15}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "LATENT", "type": "LATENT", "links": [16], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1640, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 11,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "VAEDecode"},
|
||||
"title": "VAE Decode",
|
||||
"inputs": [
|
||||
{"name": "samples", "type": "LATENT", "link": 16},
|
||||
{"name": "vae", "type": "VAE", "link": 4}
|
||||
],
|
||||
"outputs": [
|
||||
{"name": "IMAGE", "type": "IMAGE", "links": [17, 18], "slot_index": 0}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1900, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 12,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "PreviewImage"},
|
||||
"title": "Preview Result",
|
||||
"inputs": [
|
||||
{"name": "images", "type": "IMAGE", "link": 17}
|
||||
]
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"type": "SaveImage",
|
||||
"pos": [1900, 550],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 13,
|
||||
"mode": 0,
|
||||
"properties": {"Node name for S&R": "SaveImage"},
|
||||
"widgets_values": ["controlnet_output"],
|
||||
"title": "API Save Image",
|
||||
"inputs": [
|
||||
{"name": "images", "type": "IMAGE", "link": 18}
|
||||
]
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 11, 0, "MODEL"],
|
||||
[2, 1, 1, 2, 0, "CLIP"],
|
||||
[3, 1, 1, 3, 0, "CLIP"],
|
||||
[4, 1, 2, 12, 1, "VAE"],
|
||||
[5, 5, 0, 6, 3, "IMAGE"],
|
||||
[6, 8, 0, 9, 3, "IMAGE"],
|
||||
[7, 4, 0, 6, 2, "CONTROL_NET"],
|
||||
[8, 7, 0, 9, 2, "CONTROL_NET"],
|
||||
[9, 2, 0, 6, 0, "CONDITIONING"],
|
||||
[10, 3, 0, 6, 1, "CONDITIONING"],
|
||||
[11, 6, 0, 9, 0, "CONDITIONING"],
|
||||
[12, 6, 1, 9, 1, "CONDITIONING"],
|
||||
[13, 9, 0, 11, 1, "CONDITIONING"],
|
||||
[14, 9, 1, 11, 2, "CONDITIONING"],
|
||||
[15, 10, 0, 11, 3, "LATENT"],
|
||||
[16, 11, 0, 12, 0, "LATENT"],
|
||||
[17, 12, 0, 13, 0, "IMAGE"],
|
||||
[18, 12, 0, 14, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "ControlNet Fusion Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Multi-ControlNet workflow combining depth and canny edge control for precise composition. Chain multiple control methods for maximum control over generation.",
|
||||
"category": "advanced",
|
||||
"tags": [
|
||||
"controlnet",
|
||||
"multi-control",
|
||||
"depth",
|
||||
"canny",
|
||||
"advanced",
|
||||
"production",
|
||||
"sdxl"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"controlnet-depth-sdxl-1.0",
|
||||
"controlnet-canny-sdxl-1.0"
|
||||
],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "16GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A futuristic city with precise architecture",
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 3,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality, distorted",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"depth_image": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "examples/control_depth.png",
|
||||
"description": "Depth map image for spatial control"
|
||||
},
|
||||
"canny_image": {
|
||||
"node_id": 8,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "examples/control_canny.png",
|
||||
"description": "Canny edge image for structure control"
|
||||
},
|
||||
"depth_strength": {
|
||||
"node_id": 6,
|
||||
"widget_index": 0,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 0.8,
|
||||
"min": 0.0,
|
||||
"max": 2.0,
|
||||
"description": "Strength of depth control (0.0-2.0)"
|
||||
},
|
||||
"canny_strength": {
|
||||
"node_id": 9,
|
||||
"widget_index": 0,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 0.6,
|
||||
"min": 0.0,
|
||||
"max": 2.0,
|
||||
"description": "Strength of canny edge control (0.0-2.0)"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 11,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 30,
|
||||
"min": 20,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps (30-40 recommended for ControlNet)"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 11,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"images": {
|
||||
"node_id": 14,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"count": 1,
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "50-70 seconds (30 steps)",
|
||||
"vram_usage": "~18-22GB (dual ControlNet + SDXL)",
|
||||
"gpu_utilization": "95-100%"
|
||||
},
|
||||
"use_cases": [
|
||||
"Architectural visualization with precise control",
|
||||
"Product renders with exact composition",
|
||||
"Character poses with depth and edge guidance",
|
||||
"Complex scene generation with multiple constraints"
|
||||
],
|
||||
"notes": [
|
||||
"Example control images provided in examples/ directory",
|
||||
"Depth map: Grayscale image where brightness = distance from camera",
|
||||
"Canny edges: White edges on black background",
|
||||
"Adjust control strengths to balance control vs. creativity",
|
||||
"Higher steps (30-40) recommended for best quality with ControlNet"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
BIN
comfyui/workflows/advanced/examples/control_canny.png
Normal file
BIN
comfyui/workflows/advanced/examples/control_canny.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 4.8 KiB |
BIN
comfyui/workflows/advanced/examples/control_depth.png
Normal file
BIN
comfyui/workflows/advanced/examples/control_depth.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 20 KiB |
@@ -0,0 +1,381 @@
|
||||
{
|
||||
"last_node_id": 18,
|
||||
"last_link_id": 25,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusers/stable-diffusion-xl-base-1.0"
|
||||
],
|
||||
"title": "SDXL Checkpoint Loader",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
300
|
||||
],
|
||||
"widgets_values": [
|
||||
"composition_ref1.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Composition Ref 1",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
650
|
||||
],
|
||||
"widgets_values": [
|
||||
"composition_ref2.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Composition Ref 2",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "IPAdapterUnifiedLoader",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"PLUS (high strength)"
|
||||
],
|
||||
"title": "IP-Adapter Loader 1",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterUnifiedLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "IPAdapterUnifiedLoader",
|
||||
"pos": [
|
||||
450,
|
||||
250
|
||||
],
|
||||
"widgets_values": [
|
||||
"PLUS (high strength)"
|
||||
],
|
||||
"title": "IP-Adapter Loader 2",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterUnifiedLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "IPAdapter",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
0.6,
|
||||
0.0,
|
||||
"original"
|
||||
],
|
||||
"title": "Apply IP-Adapter 1",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapter"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "IPAdapter",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
0.5,
|
||||
0.0,
|
||||
"original"
|
||||
],
|
||||
"title": "Apply IP-Adapter 2",
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapter"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
500
|
||||
],
|
||||
"widgets_values": [
|
||||
"Complex scene composition, detailed, professional"
|
||||
],
|
||||
"title": "API Positive Prompt",
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
750
|
||||
],
|
||||
"widgets_values": [
|
||||
"blurry, low quality"
|
||||
],
|
||||
"title": "API Negative Prompt",
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [
|
||||
800,
|
||||
500
|
||||
],
|
||||
"widgets_values": [
|
||||
1024,
|
||||
1024,
|
||||
1
|
||||
],
|
||||
"title": "API Latent Config",
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"type": "KSampler",
|
||||
"pos": [
|
||||
1400,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
35,
|
||||
7.0,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "Multi-Composition Sampler",
|
||||
"flags": {},
|
||||
"order": 10,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
1750,
|
||||
100
|
||||
],
|
||||
"title": "VAE Decode",
|
||||
"flags": {},
|
||||
"order": 11,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
2000,
|
||||
100
|
||||
],
|
||||
"title": "Preview Output",
|
||||
"flags": {},
|
||||
"order": 12,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
2000,
|
||||
550
|
||||
],
|
||||
"widgets_values": [
|
||||
"ipadapter_composition_output"
|
||||
],
|
||||
"title": "API Image Output",
|
||||
"flags": {},
|
||||
"order": 13,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "IP-Adapter Multi-Composition Image-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Complex scene composition using multiple IP-Adapter references. Combine visual elements from multiple source images.",
|
||||
"category": "image-to-image",
|
||||
"tags": [
|
||||
"ipadapter",
|
||||
"composition",
|
||||
"multi-reference",
|
||||
"i2i",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"ip-adapter-plus"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI_IPAdapter_plus"
|
||||
],
|
||||
"vram_min": "18GB"
|
||||
},
|
||||
"parameters": {
|
||||
"ref_image_1": {
|
||||
"node_id": 2,
|
||||
"type": "image",
|
||||
"required": true,
|
||||
"description": "First composition reference"
|
||||
},
|
||||
"ref_image_2": {
|
||||
"node_id": 3,
|
||||
"type": "image",
|
||||
"required": true,
|
||||
"description": "Second composition reference"
|
||||
},
|
||||
"weight_1": {
|
||||
"node_id": 6,
|
||||
"type": "float",
|
||||
"default": 0.6,
|
||||
"description": "Weight for first reference"
|
||||
},
|
||||
"weight_2": {
|
||||
"node_id": 7,
|
||||
"type": "float",
|
||||
"default": 0.5,
|
||||
"description": "Weight for second reference"
|
||||
}
|
||||
},
|
||||
"use_cases": [
|
||||
"Multi-source scene composition",
|
||||
"Blend multiple visual concepts",
|
||||
"Complex artistic compositions",
|
||||
"Style mixing"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,309 @@
|
||||
{
|
||||
"last_node_id": 15,
|
||||
"last_link_id": 20,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
},
|
||||
"widgets_values": [
|
||||
"diffusers/stable-diffusion-xl-base-1.0"
|
||||
],
|
||||
"title": "SDXL Base Checkpoint Loader",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
300
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
314
|
||||
],
|
||||
"widgets_values": [
|
||||
"face_reference.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Face Reference Input",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "IPAdapterUnifiedLoader",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 78
|
||||
},
|
||||
"widgets_values": [
|
||||
"FACE"
|
||||
],
|
||||
"title": "IP-Adapter Face Loader",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterUnifiedLoader"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "IPAdapterFaceID",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 258
|
||||
},
|
||||
"widgets_values": [
|
||||
0.85,
|
||||
0.0,
|
||||
"original",
|
||||
0.0,
|
||||
1.0,
|
||||
true
|
||||
],
|
||||
"title": "Apply IP-Adapter Face",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterFaceID"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
400
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
},
|
||||
"widgets_values": [
|
||||
"A professional portrait, studio lighting, detailed face"
|
||||
],
|
||||
"title": "API Positive Prompt",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
650
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
},
|
||||
"widgets_values": [
|
||||
"blurry, distorted face, low quality"
|
||||
],
|
||||
"title": "API Negative Prompt",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [
|
||||
800,
|
||||
450
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 106
|
||||
},
|
||||
"widgets_values": [
|
||||
1024,
|
||||
1024,
|
||||
1
|
||||
],
|
||||
"title": "API Latent Image Config",
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "KSampler",
|
||||
"pos": [
|
||||
1170,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30,
|
||||
6.5,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "Sampler with Face",
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
1540,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 210,
|
||||
"1": 46
|
||||
},
|
||||
"title": "VAE Decode",
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
1800,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 400
|
||||
},
|
||||
"title": "Preview Output",
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1800,
|
||||
550
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 100
|
||||
},
|
||||
"widgets_values": [
|
||||
"ipadapter_face_output"
|
||||
],
|
||||
"title": "API Image Output",
|
||||
"flags": {},
|
||||
"order": 10,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "IP-Adapter Face Portrait Image-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Face-focused generation using IP-Adapter Face model. Transfer facial features from reference to generate new portraits or perform face swaps.",
|
||||
"category": "image-to-image",
|
||||
"tags": [
|
||||
"ipadapter",
|
||||
"face",
|
||||
"portrait",
|
||||
"i2i",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"ip-adapter-face"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI_IPAdapter_plus"
|
||||
],
|
||||
"vram_min": "16GB"
|
||||
},
|
||||
"parameters": {
|
||||
"face_image": {
|
||||
"node_id": 2,
|
||||
"type": "image",
|
||||
"required": true,
|
||||
"description": "Reference face image"
|
||||
},
|
||||
"prompt": {
|
||||
"node_id": 5,
|
||||
"type": "string",
|
||||
"default": "A professional portrait",
|
||||
"description": "Portrait description"
|
||||
},
|
||||
"face_weight": {
|
||||
"node_id": 4,
|
||||
"type": "float",
|
||||
"default": 0.85,
|
||||
"description": "Face similarity strength (0.85 recommended)"
|
||||
}
|
||||
},
|
||||
"use_cases": [
|
||||
"Portrait generation with specific face",
|
||||
"Face swap in different contexts",
|
||||
"Consistent character portraits",
|
||||
"Professional headshots"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,725 @@
|
||||
{
|
||||
"last_node_id": 15,
|
||||
"last_link_id": 20,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [
|
||||
1
|
||||
],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [
|
||||
2,
|
||||
3
|
||||
],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [
|
||||
4
|
||||
],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": [
|
||||
"diffusers/stable-diffusion-xl-base-1.0"
|
||||
],
|
||||
"title": "SDXL Base Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
300
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
314
|
||||
],
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [
|
||||
11
|
||||
],
|
||||
"shape": 3
|
||||
},
|
||||
{
|
||||
"name": "MASK",
|
||||
"type": "MASK",
|
||||
"links": null,
|
||||
"shape": 3
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"widgets_values": [
|
||||
"style_reference.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Style Reference Input"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "IPAdapterUnifiedLoader",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 78
|
||||
},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "ipadapter",
|
||||
"type": "IPADAPTER",
|
||||
"link": null
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"links": [
|
||||
12
|
||||
],
|
||||
"shape": 3,
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "ipadapter",
|
||||
"type": "IPADAPTER",
|
||||
"links": [
|
||||
13
|
||||
],
|
||||
"shape": 3,
|
||||
"slot_index": 1
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterUnifiedLoader"
|
||||
},
|
||||
"widgets_values": [
|
||||
"PLUS (high strength)"
|
||||
],
|
||||
"title": "IP-Adapter Loader"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "IPAdapter",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 258
|
||||
},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "ipadapter",
|
||||
"type": "IPADAPTER",
|
||||
"link": 13
|
||||
},
|
||||
{
|
||||
"name": "clip_vision",
|
||||
"type": "CLIP_VISION",
|
||||
"link": null
|
||||
},
|
||||
{
|
||||
"name": "image",
|
||||
"type": "IMAGE",
|
||||
"link": 11
|
||||
},
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 12
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [
|
||||
14
|
||||
],
|
||||
"shape": 3,
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "IPAdapterApply"
|
||||
},
|
||||
"widgets_values": [
|
||||
0.75,
|
||||
0.0,
|
||||
"original",
|
||||
0.0,
|
||||
1.0,
|
||||
false
|
||||
],
|
||||
"title": "Apply IP-Adapter Style"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
400
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [
|
||||
5
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"A portrait of a person, highly detailed, professional photography"
|
||||
],
|
||||
"title": "API Positive Prompt"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
650
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [
|
||||
6
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": [
|
||||
"blurry, low quality, distorted, deformed"
|
||||
],
|
||||
"title": "API Negative Prompt"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [
|
||||
800,
|
||||
450
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 106
|
||||
},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
7
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [
|
||||
1024,
|
||||
1024,
|
||||
1
|
||||
],
|
||||
"title": "API Latent Image Config"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "KSampler",
|
||||
"pos": [
|
||||
1170,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 14
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 5
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [
|
||||
8
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30,
|
||||
6.5,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "Sampler with Style"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
1540,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 210,
|
||||
"1": 46
|
||||
},
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 8
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 4
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [
|
||||
9,
|
||||
10
|
||||
],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
1800,
|
||||
100
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 400
|
||||
},
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Output"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1800,
|
||||
550
|
||||
],
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 100
|
||||
},
|
||||
"flags": {},
|
||||
"order": 10,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 10
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"widgets_values": [
|
||||
"ipadapter_style_output"
|
||||
],
|
||||
"title": "API Image Output"
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[
|
||||
1,
|
||||
1,
|
||||
0,
|
||||
3,
|
||||
0,
|
||||
"MODEL"
|
||||
],
|
||||
[
|
||||
2,
|
||||
1,
|
||||
1,
|
||||
5,
|
||||
0,
|
||||
"CLIP"
|
||||
],
|
||||
[
|
||||
3,
|
||||
1,
|
||||
1,
|
||||
6,
|
||||
0,
|
||||
"CLIP"
|
||||
],
|
||||
[
|
||||
4,
|
||||
1,
|
||||
2,
|
||||
9,
|
||||
1,
|
||||
"VAE"
|
||||
],
|
||||
[
|
||||
5,
|
||||
5,
|
||||
0,
|
||||
8,
|
||||
1,
|
||||
"CONDITIONING"
|
||||
],
|
||||
[
|
||||
6,
|
||||
6,
|
||||
0,
|
||||
8,
|
||||
2,
|
||||
"CONDITIONING"
|
||||
],
|
||||
[
|
||||
7,
|
||||
7,
|
||||
0,
|
||||
8,
|
||||
3,
|
||||
"LATENT"
|
||||
],
|
||||
[
|
||||
8,
|
||||
8,
|
||||
0,
|
||||
9,
|
||||
0,
|
||||
"LATENT"
|
||||
],
|
||||
[
|
||||
9,
|
||||
9,
|
||||
0,
|
||||
10,
|
||||
0,
|
||||
"IMAGE"
|
||||
],
|
||||
[
|
||||
10,
|
||||
9,
|
||||
0,
|
||||
11,
|
||||
0,
|
||||
"IMAGE"
|
||||
],
|
||||
[
|
||||
11,
|
||||
2,
|
||||
0,
|
||||
4,
|
||||
2,
|
||||
"IMAGE"
|
||||
],
|
||||
[
|
||||
12,
|
||||
3,
|
||||
0,
|
||||
4,
|
||||
3,
|
||||
"MODEL"
|
||||
],
|
||||
[
|
||||
13,
|
||||
3,
|
||||
1,
|
||||
4,
|
||||
0,
|
||||
"IPADAPTER"
|
||||
],
|
||||
[
|
||||
14,
|
||||
4,
|
||||
0,
|
||||
8,
|
||||
0,
|
||||
"MODEL"
|
||||
]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "IP-Adapter Style Transfer Image-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "Style transfer using IP-Adapter. Apply the visual style from a reference image to generate new images matching that aesthetic.",
|
||||
"category": "image-to-image",
|
||||
"tags": [
|
||||
"ipadapter",
|
||||
"style-transfer",
|
||||
"i2i",
|
||||
"production",
|
||||
"sdxl"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"ip-adapter-plus"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI_IPAdapter_plus"
|
||||
],
|
||||
"vram_min": "16GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"style_image": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "image",
|
||||
"required": true,
|
||||
"description": "Reference image for style extraction"
|
||||
},
|
||||
"prompt": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A portrait of a person",
|
||||
"description": "Text description of desired content"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 6,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"style_weight": {
|
||||
"node_id": 4,
|
||||
"widget_index": 0,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 0.75,
|
||||
"min": 0.0,
|
||||
"max": 1.0,
|
||||
"description": "Strength of style application (0.75 recommended)"
|
||||
},
|
||||
"width": {
|
||||
"node_id": 7,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Output image width"
|
||||
},
|
||||
"height": {
|
||||
"node_id": 7,
|
||||
"widget_index": 1,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Output image height"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 8,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 8,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 30,
|
||||
"min": 20,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps"
|
||||
},
|
||||
"cfg": {
|
||||
"node_id": 8,
|
||||
"widget_index": 3,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 6.5,
|
||||
"min": 1.0,
|
||||
"max": 15.0,
|
||||
"description": "Classifier-free guidance scale"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"node_id": 11,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "30-40 seconds",
|
||||
"vram_usage": "~16-18GB",
|
||||
"gpu_utilization": "95-100%"
|
||||
},
|
||||
"use_cases": [
|
||||
"Apply artistic styles to new subjects",
|
||||
"Match aesthetic of reference images",
|
||||
"Consistent style across generated images",
|
||||
"Photography style transfer"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,201 @@
|
||||
{
|
||||
"last_node_id": 10,
|
||||
"last_link_id": 12,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"size": [
|
||||
315,
|
||||
314
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_frame.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Image",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "DiffusersLoader",
|
||||
"pos": [
|
||||
50,
|
||||
500
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusion_models/CogVideoX-5b"
|
||||
],
|
||||
"title": "CogVideoX-5b Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "DiffusersLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"Camera movement description, action, scene details"
|
||||
],
|
||||
"title": "API Video Prompt",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "CogVideoXSampler",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
50,
|
||||
6.0,
|
||||
49,
|
||||
6
|
||||
],
|
||||
"title": "CogVideoX Sampler (6s @ 8fps)",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CogVideoXSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
1150,
|
||||
100
|
||||
],
|
||||
"title": "VAE Decode Video",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "VHS_VideoCombine",
|
||||
"pos": [
|
||||
1450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
8,
|
||||
0,
|
||||
"cogvideox_output",
|
||||
"video/h264-mp4"
|
||||
],
|
||||
"title": "Combine Video Frames",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VHS_VideoCombine"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "CogVideoX Image-to-Video Production",
|
||||
"version": "1.0.0",
|
||||
"description": "AI-driven image-to-video using CogVideoX-5b. Generate 6-second videos (48 frames @ 8fps) from input images with camera movement and action.",
|
||||
"category": "image-to-video",
|
||||
"tags": [
|
||||
"cogvideox",
|
||||
"i2v",
|
||||
"video-generation",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"CogVideoX-5b"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI-VideoHelperSuite",
|
||||
"ComfyUI-CogVideoXWrapper"
|
||||
],
|
||||
"vram_min": "20GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true,
|
||||
"description": "Starting frame for video"
|
||||
},
|
||||
"video_prompt": {
|
||||
"node_id": 3,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"description": "Describe camera movement and action"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 4,
|
||||
"type": "integer",
|
||||
"default": 50,
|
||||
"description": "Sampling steps (50 recommended)"
|
||||
},
|
||||
"fps": {
|
||||
"node_id": 6,
|
||||
"type": "integer",
|
||||
"default": 8,
|
||||
"description": "Output framerate"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "120-180 seconds",
|
||||
"vram_usage": "~20-22GB",
|
||||
"output": "6 seconds @ 8fps (48 frames)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
177
comfyui/workflows/image-to-video/svd-i2v-production-v1.json
Normal file
177
comfyui/workflows/image-to-video/svd-i2v-production-v1.json
Normal file
@@ -0,0 +1,177 @@
|
||||
{
|
||||
"last_node_id": 8,
|
||||
"last_link_id": 10,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_frame.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Image",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "DiffusersLoader",
|
||||
"pos": [
|
||||
50,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusion_models/stable-video-diffusion-img2vid"
|
||||
],
|
||||
"title": "SVD Model Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "DiffusersLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "SVDSampler",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
25,
|
||||
14,
|
||||
127,
|
||||
0.02
|
||||
],
|
||||
"title": "SVD Sampler (14 frames)",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SVDSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"title": "VAE Decode Video",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "VHS_VideoCombine",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
6,
|
||||
0,
|
||||
"svd_output",
|
||||
"video/h264-mp4"
|
||||
],
|
||||
"title": "Combine Frames",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VHS_VideoCombine"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Stable Video Diffusion Image-to-Video Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Quick animation using SVD. Generate 14-frame video from single image with motion and camera movement.",
|
||||
"category": "image-to-video",
|
||||
"tags": [
|
||||
"svd",
|
||||
"stable-video-diffusion",
|
||||
"i2v",
|
||||
"animation",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-video-diffusion-img2vid"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI-VideoHelperSuite"
|
||||
],
|
||||
"vram_min": "16GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 25
|
||||
},
|
||||
"frames": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 14,
|
||||
"description": "Number of output frames"
|
||||
},
|
||||
"motion_bucket": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 127,
|
||||
"description": "Motion amount (0-255)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "25-35 seconds",
|
||||
"vram_usage": "~14-16GB",
|
||||
"output": "14 frames (~2.3s @ 6fps)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
176
comfyui/workflows/image-to-video/svd-xt-i2v-production-v1.json
Normal file
176
comfyui/workflows/image-to-video/svd-xt-i2v-production-v1.json
Normal file
@@ -0,0 +1,176 @@
|
||||
{
|
||||
"last_node_id": 8,
|
||||
"last_link_id": 10,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_frame.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Image",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "DiffusersLoader",
|
||||
"pos": [
|
||||
50,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusion_models/stable-video-diffusion-img2vid-xt"
|
||||
],
|
||||
"title": "SVD-XT Model Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "DiffusersLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "SVDSampler",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30,
|
||||
25,
|
||||
127,
|
||||
0.02
|
||||
],
|
||||
"title": "SVD-XT Sampler (25 frames)",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SVDSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"title": "VAE Decode Video",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "VHS_VideoCombine",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
6,
|
||||
0,
|
||||
"svd_xt_output",
|
||||
"video/h264-mp4"
|
||||
],
|
||||
"title": "Combine Frames",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VHS_VideoCombine"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"links": [],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Stable Video Diffusion XT Image-to-Video Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Extended animation using SVD-XT. Generate 25-frame video for longer animations with smooth motion.",
|
||||
"category": "image-to-video",
|
||||
"tags": [
|
||||
"svd-xt",
|
||||
"stable-video-diffusion",
|
||||
"i2v",
|
||||
"extended",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-video-diffusion-img2vid-xt"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI-VideoHelperSuite"
|
||||
],
|
||||
"vram_min": "18GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 30
|
||||
},
|
||||
"frames": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 25,
|
||||
"description": "Number of output frames"
|
||||
},
|
||||
"motion_bucket": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 127
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "40-55 seconds",
|
||||
"vram_usage": "~16-18GB",
|
||||
"output": "25 frames (~4.2s @ 6fps)"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
358
comfyui/workflows/text-to-image/flux-dev-t2i-production-v1.json
Normal file
358
comfyui/workflows/text-to-image/flux-dev-t2i-production-v1.json
Normal file
@@ -0,0 +1,358 @@
|
||||
{
|
||||
"last_node_id": 12,
|
||||
"last_link_id": 15,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [1],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [2, 3],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [4],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": ["diffusers/FLUX.1-dev"],
|
||||
"title": "FLUX Dev Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [5],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD"],
|
||||
"title": "API Positive Prompt"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [6],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["blurry, low quality, distorted, watermark, text, signature, cartoon"],
|
||||
"title": "API Negative Prompt"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [7],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [1024, 1024, 1],
|
||||
"title": "API Latent Image Config"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "KSampler",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 5
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [8],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
20,
|
||||
3.5,
|
||||
"euler",
|
||||
"normal",
|
||||
1
|
||||
],
|
||||
"title": "FLUX Dev Sampler (20-50 steps)"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 8
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 4
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [9, 10],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1530, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Output"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "SaveImage",
|
||||
"pos": [1530, 550],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 10
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"widgets_values": ["flux_dev_output"],
|
||||
"title": "API Image Output"
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 5, 0, "MODEL"],
|
||||
[2, 1, 1, 2, 0, "CLIP"],
|
||||
[3, 1, 1, 3, 0, "CLIP"],
|
||||
[4, 1, 2, 6, 1, "VAE"],
|
||||
[5, 2, 0, 5, 1, "CONDITIONING"],
|
||||
[6, 3, 0, 5, 2, "CONDITIONING"],
|
||||
[7, 4, 0, 5, 3, "LATENT"],
|
||||
[8, 5, 0, 6, 0, "LATENT"],
|
||||
[9, 6, 0, 7, 0, "IMAGE"],
|
||||
[10, 6, 0, 8, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "FLUX Dev Text-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "High-quality text-to-image generation using FLUX.1-dev (20-50 steps). Optimized for final production outputs with excellent detail and coherence.",
|
||||
"category": "text-to-image",
|
||||
"tags": ["flux", "dev", "high-quality", "production", "t2i"],
|
||||
"requirements": {
|
||||
"models": ["FLUX.1-dev"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "20GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A beautiful mountain landscape at sunset",
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 3,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"width": {
|
||||
"node_id": 4,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image width in pixels"
|
||||
},
|
||||
"height": {
|
||||
"node_id": 4,
|
||||
"widget_index": 1,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image height in pixels"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 5,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 20,
|
||||
"min": 10,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps (20-50 recommended for FLUX Dev)"
|
||||
},
|
||||
"cfg": {
|
||||
"node_id": 5,
|
||||
"widget_index": 3,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 3.5,
|
||||
"min": 1.0,
|
||||
"max": 10.0,
|
||||
"description": "Classifier-free guidance scale (3.5 recommended)"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"node_id": 8,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "25-35 seconds (20 steps), 60-75 seconds (50 steps)",
|
||||
"vram_usage": "~20-22GB",
|
||||
"gpu_utilization": "95-100%"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,358 @@
|
||||
{
|
||||
"last_node_id": 12,
|
||||
"last_link_id": 15,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [1],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [2, 3],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [4],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": ["diffusers/FLUX.1-schnell"],
|
||||
"title": "FLUX Schnell Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [5],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, cinematic lighting, 8k"],
|
||||
"title": "API Positive Prompt"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [6],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["blurry, low quality, distorted, watermark"],
|
||||
"title": "API Negative Prompt"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [7],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [1024, 1024, 1],
|
||||
"title": "API Latent Image Config"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "KSampler",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 5
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [8],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
4,
|
||||
1.0,
|
||||
"euler",
|
||||
"normal",
|
||||
1
|
||||
],
|
||||
"title": "FLUX Schnell Sampler (4 steps)"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 8
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 4
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [9, 10],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1530, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Output"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "SaveImage",
|
||||
"pos": [1530, 550],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 10
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"widgets_values": ["flux_schnell_output"],
|
||||
"title": "API Image Output"
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 5, 0, "MODEL"],
|
||||
[2, 1, 1, 2, 0, "CLIP"],
|
||||
[3, 1, 1, 3, 0, "CLIP"],
|
||||
[4, 1, 2, 6, 1, "VAE"],
|
||||
[5, 2, 0, 5, 1, "CONDITIONING"],
|
||||
[6, 3, 0, 5, 2, "CONDITIONING"],
|
||||
[7, 4, 0, 5, 3, "LATENT"],
|
||||
[8, 5, 0, 6, 0, "LATENT"],
|
||||
[9, 6, 0, 7, 0, "IMAGE"],
|
||||
[10, 6, 0, 8, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "FLUX Schnell Text-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "Fast text-to-image generation using FLUX.1-schnell (4 steps). Optimized for rapid prototyping and iteration.",
|
||||
"category": "text-to-image",
|
||||
"tags": ["flux", "schnell", "fast", "production", "t2i"],
|
||||
"requirements": {
|
||||
"models": ["FLUX.1-schnell"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "16GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A beautiful mountain landscape at sunset",
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 3,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"width": {
|
||||
"node_id": 4,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image width in pixels"
|
||||
},
|
||||
"height": {
|
||||
"node_id": 4,
|
||||
"widget_index": 1,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image height in pixels"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 5,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 4,
|
||||
"min": 1,
|
||||
"max": 10,
|
||||
"description": "Number of sampling steps (4 recommended for Schnell)"
|
||||
},
|
||||
"cfg": {
|
||||
"node_id": 5,
|
||||
"widget_index": 3,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 1.0,
|
||||
"min": 1.0,
|
||||
"max": 1.0,
|
||||
"description": "Classifier-free guidance scale (1.0 for FLUX Schnell)"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"node_id": 8,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "5-8 seconds",
|
||||
"vram_usage": "~16GB",
|
||||
"gpu_utilization": "95-100%"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,358 @@
|
||||
{
|
||||
"last_node_id": 12,
|
||||
"last_link_id": 15,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [1],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [2, 3],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [4],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": ["diffusers/stable-diffusion-3.5-large"],
|
||||
"title": "SD3.5 Large Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [5],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD, photorealistic"],
|
||||
"title": "API Positive Prompt"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [6],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["blurry, low quality, distorted, watermark, text, signature, ugly, cartoon, painting"],
|
||||
"title": "API Negative Prompt"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [7],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [1024, 1024, 1],
|
||||
"title": "API Latent Image Config"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "KSampler",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 5
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [8],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
28,
|
||||
4.5,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "SD3.5 Sampler (28 steps)"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 8
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 4
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [9, 10],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1530, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Output"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "SaveImage",
|
||||
"pos": [1530, 550],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 10
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"widgets_values": ["sd35_large_output"],
|
||||
"title": "API Image Output"
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 5, 0, "MODEL"],
|
||||
[2, 1, 1, 2, 0, "CLIP"],
|
||||
[3, 1, 1, 3, 0, "CLIP"],
|
||||
[4, 1, 2, 6, 1, "VAE"],
|
||||
[5, 2, 0, 5, 1, "CONDITIONING"],
|
||||
[6, 3, 0, 5, 2, "CONDITIONING"],
|
||||
[7, 4, 0, 5, 3, "LATENT"],
|
||||
[8, 5, 0, 6, 0, "LATENT"],
|
||||
[9, 6, 0, 7, 0, "IMAGE"],
|
||||
[10, 6, 0, 8, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Stable Diffusion 3.5 Large Text-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "Latest generation text-to-image using Stable Diffusion 3.5 Large (28 steps). Provides excellent photorealism and prompt adherence.",
|
||||
"category": "text-to-image",
|
||||
"tags": ["sd3.5", "stable-diffusion", "large", "production", "t2i", "photorealistic"],
|
||||
"requirements": {
|
||||
"models": ["stable-diffusion-3.5-large"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "18GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A beautiful mountain landscape at sunset",
|
||||
"description": "Text description of desired image"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": 3,
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid"
|
||||
},
|
||||
"width": {
|
||||
"node_id": 4,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image width in pixels"
|
||||
},
|
||||
"height": {
|
||||
"node_id": 4,
|
||||
"widget_index": 1,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image height in pixels"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": 5,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility"
|
||||
},
|
||||
"steps": {
|
||||
"node_id": 5,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 28,
|
||||
"min": 20,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps (28 recommended for SD3.5)"
|
||||
},
|
||||
"cfg": {
|
||||
"node_id": 5,
|
||||
"widget_index": 3,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 4.5,
|
||||
"min": 1.0,
|
||||
"max": 15.0,
|
||||
"description": "Classifier-free guidance scale (4.5 recommended)"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"image": {
|
||||
"node_id": 8,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024 (configurable)"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "35-45 seconds",
|
||||
"vram_usage": "~18-20GB",
|
||||
"gpu_utilization": "95-100%"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,581 @@
|
||||
{
|
||||
"last_node_id": 15,
|
||||
"last_link_id": 22,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 100],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [1],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [2, 3],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [4],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": ["diffusers/stable-diffusion-xl-base-1.0"],
|
||||
"title": "SDXL Base Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [50, 300],
|
||||
"size": {"0": 350, "1": 100},
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "MODEL",
|
||||
"type": "MODEL",
|
||||
"links": [11],
|
||||
"slot_index": 0
|
||||
},
|
||||
{
|
||||
"name": "CLIP",
|
||||
"type": "CLIP",
|
||||
"links": [12, 13],
|
||||
"slot_index": 1
|
||||
},
|
||||
{
|
||||
"name": "VAE",
|
||||
"type": "VAE",
|
||||
"links": [14],
|
||||
"slot_index": 2
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"widgets_values": ["diffusers/stable-diffusion-xl-refiner-1.0"],
|
||||
"title": "SDXL Refiner Checkpoint Loader"
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 100],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 2
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [5],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD, masterpiece"],
|
||||
"title": "API Positive Prompt (Base)"
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 350],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 3
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [6],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["blurry, low quality, distorted, watermark, text, signature, bad anatomy, deformed"],
|
||||
"title": "API Negative Prompt (Base)"
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 600],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 12
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [15],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["A beautiful mountain landscape at sunset, highly detailed, professional photography, cinematic lighting, 8k ultra HD, masterpiece"],
|
||||
"title": "API Positive Prompt (Refiner)"
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [450, 850],
|
||||
"size": {"0": 400, "1": 200},
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "clip",
|
||||
"type": "CLIP",
|
||||
"link": 13
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "CONDITIONING",
|
||||
"type": "CONDITIONING",
|
||||
"links": [16],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"widgets_values": ["blurry, low quality, distorted, watermark, text, signature, bad anatomy, deformed"],
|
||||
"title": "API Negative Prompt (Refiner)"
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "EmptyLatentImage",
|
||||
"pos": [900, 600],
|
||||
"size": {"0": 315, "1": 106},
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [7],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "EmptyLatentImage"
|
||||
},
|
||||
"widgets_values": [1024, 1024, 1],
|
||||
"title": "API Latent Image Config"
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "KSampler",
|
||||
"pos": [900, 100],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 1
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 5
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 6
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 7
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [8, 17],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30,
|
||||
7.5,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "SDXL Base Sampler (30 steps)"
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1270, 100],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 8
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 4
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [9],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode (Base)"
|
||||
},
|
||||
{
|
||||
"id": 10,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1530, 100],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 9,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 9
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Base Output"
|
||||
},
|
||||
{
|
||||
"id": 11,
|
||||
"type": "KSampler",
|
||||
"pos": [1270, 600],
|
||||
"size": {"0": 315, "1": 474},
|
||||
"flags": {},
|
||||
"order": 10,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "model",
|
||||
"type": "MODEL",
|
||||
"link": 11
|
||||
},
|
||||
{
|
||||
"name": "positive",
|
||||
"type": "CONDITIONING",
|
||||
"link": 15
|
||||
},
|
||||
{
|
||||
"name": "negative",
|
||||
"type": "CONDITIONING",
|
||||
"link": 16
|
||||
},
|
||||
{
|
||||
"name": "latent_image",
|
||||
"type": "LATENT",
|
||||
"link": 17
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "LATENT",
|
||||
"type": "LATENT",
|
||||
"links": [18],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "KSampler"
|
||||
},
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
20,
|
||||
7.5,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
1
|
||||
],
|
||||
"title": "SDXL Refiner Sampler (20 steps)"
|
||||
},
|
||||
{
|
||||
"id": 12,
|
||||
"type": "VAEDecode",
|
||||
"pos": [1640, 600],
|
||||
"size": {"0": 210, "1": 46},
|
||||
"flags": {},
|
||||
"order": 11,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "samples",
|
||||
"type": "LATENT",
|
||||
"link": 18
|
||||
},
|
||||
{
|
||||
"name": "vae",
|
||||
"type": "VAE",
|
||||
"link": 14
|
||||
}
|
||||
],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "IMAGE",
|
||||
"type": "IMAGE",
|
||||
"links": [19, 20],
|
||||
"slot_index": 0
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"title": "VAE Decode (Refiner)"
|
||||
},
|
||||
{
|
||||
"id": 13,
|
||||
"type": "PreviewImage",
|
||||
"pos": [1900, 600],
|
||||
"size": {"0": 400, "1": 400},
|
||||
"flags": {},
|
||||
"order": 12,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 19
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"title": "Preview Refined Output"
|
||||
},
|
||||
{
|
||||
"id": 14,
|
||||
"type": "SaveImage",
|
||||
"pos": [1900, 1050],
|
||||
"size": {"0": 400, "1": 100},
|
||||
"flags": {},
|
||||
"order": 13,
|
||||
"mode": 0,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "images",
|
||||
"type": "IMAGE",
|
||||
"link": 20
|
||||
}
|
||||
],
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"widgets_values": ["sdxl_refined_output"],
|
||||
"title": "API Image Output"
|
||||
}
|
||||
],
|
||||
"links": [
|
||||
[1, 1, 0, 8, 0, "MODEL"],
|
||||
[2, 1, 1, 3, 0, "CLIP"],
|
||||
[3, 1, 1, 4, 0, "CLIP"],
|
||||
[4, 1, 2, 9, 1, "VAE"],
|
||||
[5, 3, 0, 8, 1, "CONDITIONING"],
|
||||
[6, 4, 0, 8, 2, "CONDITIONING"],
|
||||
[7, 7, 0, 8, 3, "LATENT"],
|
||||
[8, 8, 0, 9, 0, "LATENT"],
|
||||
[9, 9, 0, 10, 0, "IMAGE"],
|
||||
[11, 2, 0, 11, 0, "MODEL"],
|
||||
[12, 2, 1, 5, 0, "CLIP"],
|
||||
[13, 2, 1, 6, 0, "CLIP"],
|
||||
[14, 2, 2, 12, 1, "VAE"],
|
||||
[15, 5, 0, 11, 1, "CONDITIONING"],
|
||||
[16, 6, 0, 11, 2, "CONDITIONING"],
|
||||
[17, 8, 0, 11, 3, "LATENT"],
|
||||
[18, 11, 0, 12, 0, "LATENT"],
|
||||
[19, 12, 0, 13, 0, "IMAGE"],
|
||||
[20, 12, 0, 14, 0, "IMAGE"]
|
||||
],
|
||||
"groups": [],
|
||||
"config": {},
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "SDXL with Refiner Text-to-Image Production",
|
||||
"version": "1.0.0",
|
||||
"author": "RunPod AI Model Orchestrator",
|
||||
"description": "Two-stage text-to-image generation using SDXL Base (30 steps) + Refiner (20 steps). Produces highly detailed, refined outputs with excellent coherence.",
|
||||
"category": "text-to-image",
|
||||
"tags": ["sdxl", "refiner", "two-stage", "high-quality", "production", "t2i"],
|
||||
"requirements": {
|
||||
"models": ["stable-diffusion-xl-base-1.0", "stable-diffusion-xl-refiner-1.0"],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "20GB",
|
||||
"vram_recommended": "24GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": [3, 5],
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": true,
|
||||
"default": "A beautiful mountain landscape at sunset",
|
||||
"description": "Text description of desired image (used for both base and refiner)"
|
||||
},
|
||||
"negative_prompt": {
|
||||
"node_id": [4, 6],
|
||||
"widget_index": 0,
|
||||
"type": "string",
|
||||
"required": false,
|
||||
"default": "blurry, low quality",
|
||||
"description": "Undesired elements to avoid (used for both base and refiner)"
|
||||
},
|
||||
"width": {
|
||||
"node_id": 7,
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image width in pixels"
|
||||
},
|
||||
"height": {
|
||||
"node_id": 7,
|
||||
"widget_index": 1,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 1024,
|
||||
"min": 512,
|
||||
"max": 2048,
|
||||
"description": "Image height in pixels"
|
||||
},
|
||||
"seed": {
|
||||
"node_id": [8, 11],
|
||||
"widget_index": 0,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 42,
|
||||
"min": 0,
|
||||
"max": 4294967295,
|
||||
"description": "Random seed for reproducibility (same for base and refiner)"
|
||||
},
|
||||
"base_steps": {
|
||||
"node_id": 8,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 30,
|
||||
"min": 20,
|
||||
"max": 50,
|
||||
"description": "Number of sampling steps for base model"
|
||||
},
|
||||
"refiner_steps": {
|
||||
"node_id": 11,
|
||||
"widget_index": 2,
|
||||
"type": "integer",
|
||||
"required": false,
|
||||
"default": 20,
|
||||
"min": 10,
|
||||
"max": 30,
|
||||
"description": "Number of sampling steps for refiner model"
|
||||
},
|
||||
"cfg": {
|
||||
"node_id": [8, 11],
|
||||
"widget_index": 3,
|
||||
"type": "float",
|
||||
"required": false,
|
||||
"default": 7.5,
|
||||
"min": 1.0,
|
||||
"max": 15.0,
|
||||
"description": "Classifier-free guidance scale (7.5 recommended for SDXL)"
|
||||
}
|
||||
},
|
||||
"outputs": {
|
||||
"base_image": {
|
||||
"node_id": 10,
|
||||
"type": "preview",
|
||||
"description": "Base model output (before refinement)"
|
||||
},
|
||||
"refined_image": {
|
||||
"node_id": 14,
|
||||
"type": "image",
|
||||
"format": "PNG",
|
||||
"resolution": "1024x1024 (configurable)",
|
||||
"description": "Final refined output"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "40-60 seconds (30+20 steps)",
|
||||
"vram_usage": "~18-20GB",
|
||||
"gpu_utilization": "95-100%",
|
||||
"notes": "Base and refiner run sequentially to manage VRAM"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"last_node_id": 6,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "MusicGenLoader",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"facebook/musicgen-large"
|
||||
],
|
||||
"title": "MusicGen Large Loader",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "MusicGenTextEncode",
|
||||
"pos": [
|
||||
400,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"Upbeat electronic dance music"
|
||||
],
|
||||
"title": "API Music Prompt",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "MusicGenSampler",
|
||||
"pos": [
|
||||
750,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30.0,
|
||||
250,
|
||||
3.0
|
||||
],
|
||||
"title": "MusicGen Sampler",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "SaveAudio",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"musicgen_large_output",
|
||||
"wav"
|
||||
],
|
||||
"title": "API Audio Output",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "AudioSave"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "MusicGen Large Text-to-Music",
|
||||
"version": "1.0.0",
|
||||
"category": "text-to-music",
|
||||
"tags": [
|
||||
"musicgen",
|
||||
"large",
|
||||
"t2m"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"musicgen-large"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"comfyui-sound-lab"
|
||||
],
|
||||
"vram_min": "16GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"node_id": 3,
|
||||
"default": 30.0
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "45-70 seconds",
|
||||
"vram_usage": "~14-16GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"last_node_id": 6,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "MusicGenLoader",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"facebook/musicgen-medium"
|
||||
],
|
||||
"title": "MusicGen Medium Loader",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "MusicGenTextEncode",
|
||||
"pos": [
|
||||
400,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"Upbeat electronic dance music"
|
||||
],
|
||||
"title": "API Music Prompt",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "MusicGenSampler",
|
||||
"pos": [
|
||||
750,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30.0,
|
||||
250,
|
||||
3.0
|
||||
],
|
||||
"title": "MusicGen Sampler",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "SaveAudio",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"musicgen_medium_output",
|
||||
"wav"
|
||||
],
|
||||
"title": "API Audio Output",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "AudioSave"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "MusicGen Medium Text-to-Music",
|
||||
"version": "1.0.0",
|
||||
"category": "text-to-music",
|
||||
"tags": [
|
||||
"musicgen",
|
||||
"medium",
|
||||
"t2m"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"musicgen-medium"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"comfyui-sound-lab"
|
||||
],
|
||||
"vram_min": "10GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"node_id": 3,
|
||||
"default": 30.0
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "25-40 seconds",
|
||||
"vram_usage": "~8-10GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
@@ -0,0 +1,163 @@
|
||||
{
|
||||
"last_node_id": 7,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadAudio",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"melody_reference.wav"
|
||||
],
|
||||
"title": "API Melody Reference",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadAudio"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "MusicGenLoader",
|
||||
"pos": [
|
||||
50,
|
||||
350
|
||||
],
|
||||
"widgets_values": [
|
||||
"facebook/musicgen-melody"
|
||||
],
|
||||
"title": "MusicGen Melody Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "MusicGenTextEncode",
|
||||
"pos": [
|
||||
400,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"Electronic music following the melody"
|
||||
],
|
||||
"title": "API Music Prompt",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "MusicGenMelodySampler",
|
||||
"pos": [
|
||||
750,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30.0,
|
||||
250,
|
||||
3.0
|
||||
],
|
||||
"title": "MusicGen Melody Sampler",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenMelodySampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "SaveAudio",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"musicgen_melody_output",
|
||||
"wav"
|
||||
],
|
||||
"title": "API Audio Output",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "AudioSave"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "MusicGen Melody Text-to-Music",
|
||||
"version": "1.0.0",
|
||||
"category": "text-to-music",
|
||||
"tags": [
|
||||
"musicgen",
|
||||
"melody",
|
||||
"melody-conditioning",
|
||||
"t2m"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"musicgen-melody"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"comfyui-sound-lab"
|
||||
],
|
||||
"vram_min": "12GB"
|
||||
},
|
||||
"parameters": {
|
||||
"melody_audio": {
|
||||
"node_id": 1,
|
||||
"type": "audio"
|
||||
},
|
||||
"prompt": {
|
||||
"node_id": 3,
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"node_id": 4,
|
||||
"default": 30.0
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "35-55 seconds",
|
||||
"vram_usage": "~10-12GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"last_node_id": 6,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "MusicGenLoader",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"facebook/musicgen-small"
|
||||
],
|
||||
"title": "MusicGen Small Loader",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "MusicGenTextEncode",
|
||||
"pos": [
|
||||
400,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"Upbeat electronic dance music"
|
||||
],
|
||||
"title": "API Music Prompt",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "MusicGenSampler",
|
||||
"pos": [
|
||||
750,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
42,
|
||||
"fixed",
|
||||
30.0,
|
||||
250,
|
||||
3.0
|
||||
],
|
||||
"title": "MusicGen Sampler",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "MusicGenSampler"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 474
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "SaveAudio",
|
||||
"pos": [
|
||||
1100,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"musicgen_small_output",
|
||||
"wav"
|
||||
],
|
||||
"title": "API Audio Output",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "AudioSave"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "MusicGen Small Text-to-Music",
|
||||
"version": "1.0.0",
|
||||
"category": "text-to-music",
|
||||
"tags": [
|
||||
"musicgen",
|
||||
"small",
|
||||
"t2m"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"musicgen-small"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"comfyui-sound-lab"
|
||||
],
|
||||
"vram_min": "6GB"
|
||||
},
|
||||
"parameters": {
|
||||
"prompt": {
|
||||
"node_id": 2,
|
||||
"type": "string"
|
||||
},
|
||||
"duration": {
|
||||
"node_id": 3,
|
||||
"default": 30.0
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "15-25 seconds",
|
||||
"vram_usage": "~4-6GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
222
comfyui/workflows/upscaling/face-upscale-production-v1.json
Normal file
222
comfyui/workflows/upscaling/face-upscale-production-v1.json
Normal file
@@ -0,0 +1,222 @@
|
||||
{
|
||||
"last_node_id": 10,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_portrait.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Portrait",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
50,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusers/stable-diffusion-xl-base-1.0"
|
||||
],
|
||||
"title": "SDXL Checkpoint Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "FaceDetailer",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"bbox/face_yolov8m.pt",
|
||||
512,
|
||||
0.5,
|
||||
20,
|
||||
8.0,
|
||||
0.35,
|
||||
10
|
||||
],
|
||||
"title": "Face Detailer",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "FaceDetailer"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "UpscaleModelLoader",
|
||||
"pos": [
|
||||
50,
|
||||
650
|
||||
],
|
||||
"widgets_values": [
|
||||
"RealESRGAN_x2.pth"
|
||||
],
|
||||
"title": "Face Upscale Model",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "UpscaleModelLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "ImageUpscaleWithModel",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"title": "Upscale Face Regions",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "ImageUpscaleWithModel"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
1150,
|
||||
100
|
||||
],
|
||||
"title": "Preview Enhanced",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1150,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"face_upscale_output"
|
||||
],
|
||||
"title": "API Image Output",
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Face Upscale Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Portrait-focused upscaling with FaceDetailer from Impact-Pack. Detects and enhances faces with special attention to facial details.",
|
||||
"category": "upscaling",
|
||||
"tags": [
|
||||
"face-upscale",
|
||||
"portrait",
|
||||
"facedetailer",
|
||||
"impact-pack",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"face_yolov8m",
|
||||
"RealESRGAN"
|
||||
],
|
||||
"custom_nodes": [
|
||||
"ComfyUI-Impact-Pack"
|
||||
],
|
||||
"vram_min": "14GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true
|
||||
},
|
||||
"detection_threshold": {
|
||||
"node_id": 3,
|
||||
"type": "float",
|
||||
"default": 0.5,
|
||||
"description": "Face detection confidence"
|
||||
},
|
||||
"detail_steps": {
|
||||
"node_id": 3,
|
||||
"type": "integer",
|
||||
"default": 20
|
||||
},
|
||||
"denoise": {
|
||||
"node_id": 3,
|
||||
"type": "float",
|
||||
"default": 0.35
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "30-50 seconds",
|
||||
"vram_usage": "~12-16GB"
|
||||
},
|
||||
"use_cases": [
|
||||
"Portrait enhancement",
|
||||
"Professional headshots",
|
||||
"Face restoration",
|
||||
"ID photo upscaling"
|
||||
]
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
182
comfyui/workflows/upscaling/simple-upscale-production-v1.json
Normal file
182
comfyui/workflows/upscaling/simple-upscale-production-v1.json
Normal file
@@ -0,0 +1,182 @@
|
||||
{
|
||||
"last_node_id": 6,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_image.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Image",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "UpscaleModelLoader",
|
||||
"pos": [
|
||||
50,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"RealESRGAN_x4.pth"
|
||||
],
|
||||
"title": "Upscale Model Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "UpscaleModelLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "ImageUpscaleWithModel",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"title": "Upscale with Model (4x)",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "ImageUpscaleWithModel"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "ImageScale",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"lanczos",
|
||||
2
|
||||
],
|
||||
"title": "Optional Downscale to 2x",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "ImageScale"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
1150,
|
||||
100
|
||||
],
|
||||
"title": "Preview Output",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1150,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"simple_upscale_output"
|
||||
],
|
||||
"title": "API Image Output",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Simple Upscale Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Fast, straightforward upscaling using RealESRGAN. No diffusion refinement, optimized for speed.",
|
||||
"category": "upscaling",
|
||||
"tags": [
|
||||
"simple-upscale",
|
||||
"fast",
|
||||
"realesrgan",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"RealESRGAN"
|
||||
],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "8GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true
|
||||
},
|
||||
"model": {
|
||||
"node_id": 2,
|
||||
"type": "string",
|
||||
"default": "RealESRGAN_x4.pth",
|
||||
"options": [
|
||||
"RealESRGAN_x2.pth",
|
||||
"RealESRGAN_x4.pth"
|
||||
]
|
||||
},
|
||||
"final_scale": {
|
||||
"node_id": 4,
|
||||
"type": "integer",
|
||||
"default": 2
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "5-15 seconds",
|
||||
"vram_usage": "~6-8GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
@@ -0,0 +1,264 @@
|
||||
{
|
||||
"last_node_id": 12,
|
||||
"nodes": [
|
||||
{
|
||||
"id": 1,
|
||||
"type": "LoadImage",
|
||||
"pos": [
|
||||
50,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"input_image.png",
|
||||
"image"
|
||||
],
|
||||
"title": "API Input Image",
|
||||
"flags": {},
|
||||
"order": 0,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "LoadImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 2,
|
||||
"type": "CheckpointLoaderSimple",
|
||||
"pos": [
|
||||
50,
|
||||
400
|
||||
],
|
||||
"widgets_values": [
|
||||
"diffusers/stable-diffusion-xl-base-1.0"
|
||||
],
|
||||
"title": "SDXL Checkpoint Loader",
|
||||
"flags": {},
|
||||
"order": 1,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CheckpointLoaderSimple"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 3,
|
||||
"type": "UpscaleModelLoader",
|
||||
"pos": [
|
||||
50,
|
||||
600
|
||||
],
|
||||
"widgets_values": [
|
||||
"RealESRGAN_x2.pth"
|
||||
],
|
||||
"title": "Upscale Model Loader",
|
||||
"flags": {},
|
||||
"order": 2,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "UpscaleModelLoader"
|
||||
},
|
||||
"size": {
|
||||
"0": 350,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 4,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
"high quality, detailed, sharp"
|
||||
],
|
||||
"title": "API Positive Prompt",
|
||||
"flags": {},
|
||||
"order": 3,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 5,
|
||||
"type": "CLIPTextEncode",
|
||||
"pos": [
|
||||
450,
|
||||
300
|
||||
],
|
||||
"widgets_values": [
|
||||
"blurry, low quality"
|
||||
],
|
||||
"title": "API Negative Prompt",
|
||||
"flags": {},
|
||||
"order": 4,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "CLIPTextEncode"
|
||||
},
|
||||
"size": {
|
||||
"0": 400,
|
||||
"1": 200
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 6,
|
||||
"type": "UltimateSDUpscale",
|
||||
"pos": [
|
||||
800,
|
||||
100
|
||||
],
|
||||
"widgets_values": [
|
||||
2,
|
||||
42,
|
||||
20,
|
||||
8.0,
|
||||
"dpmpp_2m",
|
||||
"karras",
|
||||
0.3,
|
||||
"Linear",
|
||||
512,
|
||||
64
|
||||
],
|
||||
"title": "Ultimate SD Upscale (2x)",
|
||||
"flags": {},
|
||||
"order": 5,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "UltimateSDUpscale"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 7,
|
||||
"type": "VAEDecode",
|
||||
"pos": [
|
||||
1150,
|
||||
100
|
||||
],
|
||||
"title": "VAE Decode",
|
||||
"flags": {},
|
||||
"order": 6,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "VAEDecode"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 8,
|
||||
"type": "PreviewImage",
|
||||
"pos": [
|
||||
1450,
|
||||
100
|
||||
],
|
||||
"title": "Preview Output",
|
||||
"flags": {},
|
||||
"order": 7,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "PreviewImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": 9,
|
||||
"type": "SaveImage",
|
||||
"pos": [
|
||||
1450,
|
||||
550
|
||||
],
|
||||
"widgets_values": [
|
||||
"ultimate_upscale_output"
|
||||
],
|
||||
"title": "API Image Output",
|
||||
"flags": {},
|
||||
"order": 8,
|
||||
"mode": 0,
|
||||
"properties": {
|
||||
"Node name for S&R": "SaveImage"
|
||||
},
|
||||
"size": {
|
||||
"0": 315,
|
||||
"1": 100
|
||||
}
|
||||
}
|
||||
],
|
||||
"extra": {
|
||||
"workflow_info": {
|
||||
"name": "Ultimate SD Upscale Production",
|
||||
"version": "1.0.0",
|
||||
"description": "Professional upscaling with Ultimate SD Upscale. Combines AI upscaling with diffusion refinement for superior detail and quality.",
|
||||
"category": "upscaling",
|
||||
"tags": [
|
||||
"ultimate-sd-upscale",
|
||||
"upscaling",
|
||||
"enhancement",
|
||||
"production"
|
||||
],
|
||||
"requirements": {
|
||||
"models": [
|
||||
"stable-diffusion-xl-base-1.0",
|
||||
"RealESRGAN"
|
||||
],
|
||||
"custom_nodes": [],
|
||||
"vram_min": "18GB"
|
||||
},
|
||||
"parameters": {
|
||||
"input_image": {
|
||||
"node_id": 1,
|
||||
"type": "image",
|
||||
"required": true
|
||||
},
|
||||
"scale": {
|
||||
"node_id": 6,
|
||||
"type": "integer",
|
||||
"default": 2,
|
||||
"options": [
|
||||
2,
|
||||
4
|
||||
]
|
||||
},
|
||||
"tile_size": {
|
||||
"node_id": 6,
|
||||
"type": "integer",
|
||||
"default": 512,
|
||||
"description": "Processing tile size"
|
||||
},
|
||||
"denoise": {
|
||||
"node_id": 6,
|
||||
"type": "float",
|
||||
"default": 0.3,
|
||||
"description": "Refinement strength"
|
||||
}
|
||||
},
|
||||
"performance": {
|
||||
"avg_generation_time": "60-120 seconds (depending on input size)",
|
||||
"vram_usage": "~16-20GB"
|
||||
}
|
||||
}
|
||||
},
|
||||
"version": 0.4,
|
||||
"links": [],
|
||||
"last_link_id": 0
|
||||
}
|
||||
Reference in New Issue
Block a user