feat: add explicit file mappings to comfyui_models.yaml

- Added 'files' array to all models specifying source and destination filenames
- Image models (FLUX, SDXL, SD 3.5): Use original checkpoint filenames
- Video models (CogVideoX, SVD): Use descriptive filenames
- Audio models (MusicGen): Prefix with model name for clarity
- Support models (CLIP, IP-Adapter, AnimateDiff): Keep original names
- This allows precise control over linked filenames in ComfyUI

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
2025-11-22 17:20:18 +01:00
parent 7ebda1ae44
commit 0709dec1d4

View File

@@ -36,6 +36,9 @@ model_categories:
format: fp16 format: fp16
vram_gb: 23 vram_gb: 23
notes: Industry-leading image generation quality notes: Industry-leading image generation quality
files:
- source: "flux1-schnell.safetensors"
dest: "flux1-schnell.safetensors"
- repo_id: black-forest-labs/FLUX.1-dev - repo_id: black-forest-labs/FLUX.1-dev
description: FLUX.1 Dev - Balanced quality/speed description: FLUX.1 Dev - Balanced quality/speed
@@ -46,6 +49,9 @@ model_categories:
format: fp16 format: fp16
vram_gb: 23 vram_gb: 23
notes: Development version with enhanced features notes: Development version with enhanced features
files:
- source: "flux1-dev.safetensors"
dest: "flux1-dev.safetensors"
- repo_id: stabilityai/stable-diffusion-xl-base-1.0 - repo_id: stabilityai/stable-diffusion-xl-base-1.0
description: SDXL Base 1.0 - Industry standard description: SDXL Base 1.0 - Industry standard
@@ -56,6 +62,9 @@ model_categories:
format: fp16 format: fp16
vram_gb: 12 vram_gb: 12
notes: Most widely used Stable Diffusion model notes: Most widely used Stable Diffusion model
files:
- source: "sd_xl_base_1.0.safetensors"
dest: "sd_xl_base_1.0.safetensors"
- repo_id: stabilityai/stable-diffusion-xl-refiner-1.0 - repo_id: stabilityai/stable-diffusion-xl-refiner-1.0
description: SDXL Refiner 1.0 - Enhances base output description: SDXL Refiner 1.0 - Enhances base output
@@ -66,6 +75,9 @@ model_categories:
format: fp16 format: fp16
vram_gb: 12 vram_gb: 12
notes: Use after SDXL base for improved details notes: Use after SDXL base for improved details
files:
- source: "sd_xl_refiner_1.0.safetensors"
dest: "sd_xl_refiner_1.0.safetensors"
- repo_id: stabilityai/stable-diffusion-3.5-large - repo_id: stabilityai/stable-diffusion-3.5-large
description: SD 3.5 Large - Latest Stability AI description: SD 3.5 Large - Latest Stability AI
@@ -76,6 +88,9 @@ model_categories:
format: fp16 format: fp16
vram_gb: 20 vram_gb: 20
notes: Newest generation Stable Diffusion notes: Newest generation Stable Diffusion
files:
- source: "sd3.5_large.safetensors"
dest: "sd3.5_large.safetensors"
# ========================================================================== # ==========================================================================
# VIDEO GENERATION MODELS # VIDEO GENERATION MODELS
@@ -92,6 +107,9 @@ model_categories:
frames: 49 frames: 49
resolution: 720p resolution: 720p
notes: State-of-the-art text-to-video generation notes: State-of-the-art text-to-video generation
files:
- source: "transformer/diffusion_pytorch_model.safetensors"
dest: "cogvideox-5b-transformer.safetensors"
- repo_id: stabilityai/stable-video-diffusion-img2vid - repo_id: stabilityai/stable-video-diffusion-img2vid
description: SVD - 14 frame image-to-video description: SVD - 14 frame image-to-video
@@ -104,6 +122,9 @@ model_categories:
frames: 14 frames: 14
resolution: 576x1024 resolution: 576x1024
notes: Convert images to short video clips notes: Convert images to short video clips
files:
- source: "svd.safetensors"
dest: "svd.safetensors"
- repo_id: stabilityai/stable-video-diffusion-img2vid-xt - repo_id: stabilityai/stable-video-diffusion-img2vid-xt
description: SVD-XT - 25 frame image-to-video description: SVD-XT - 25 frame image-to-video
@@ -116,6 +137,9 @@ model_categories:
frames: 25 frames: 25
resolution: 576x1024 resolution: 576x1024
notes: Extended frame count version notes: Extended frame count version
files:
- source: "svd_xt.safetensors"
dest: "svd_xt.safetensors"
# ========================================================================== # ==========================================================================
# AUDIO GENERATION MODELS # AUDIO GENERATION MODELS
@@ -131,6 +155,9 @@ model_categories:
vram_gb: 4 vram_gb: 4
duration_seconds: 30 duration_seconds: 30
notes: Fastest music generation, lower quality notes: Fastest music generation, lower quality
files:
- source: "pytorch_model.bin"
dest: "musicgen-small-pytorch_model.bin"
- repo_id: facebook/musicgen-medium - repo_id: facebook/musicgen-medium
description: MusicGen Medium - Balanced quality description: MusicGen Medium - Balanced quality
@@ -142,6 +169,9 @@ model_categories:
vram_gb: 8 vram_gb: 8
duration_seconds: 30 duration_seconds: 30
notes: Best balance of speed and quality notes: Best balance of speed and quality
files:
- source: "pytorch_model.bin"
dest: "musicgen-medium-pytorch_model.bin"
- repo_id: facebook/musicgen-large - repo_id: facebook/musicgen-large
description: MusicGen Large - Highest quality description: MusicGen Large - Highest quality
@@ -153,6 +183,9 @@ model_categories:
vram_gb: 16 vram_gb: 16
duration_seconds: 30 duration_seconds: 30
notes: Best quality, slower generation notes: Best quality, slower generation
files:
- source: "pytorch_model.bin"
dest: "musicgen-large-pytorch_model.bin"
# ========================================================================== # ==========================================================================
# SUPPORT MODELS (CLIP, IP-Adapter, etc.) # SUPPORT MODELS (CLIP, IP-Adapter, etc.)
@@ -167,6 +200,9 @@ model_categories:
format: fp32 format: fp32
vram_gb: 2 vram_gb: 2
notes: Text-image understanding model notes: Text-image understanding model
files:
- source: "pytorch_model.bin"
dest: "clip-vit-large-patch14.bin"
- repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k
description: CLIP G - For SDXL IP-Adapter description: CLIP G - For SDXL IP-Adapter
@@ -177,6 +213,9 @@ model_categories:
format: fp32 format: fp32
vram_gb: 4 vram_gb: 4
notes: Larger CLIP model for SDXL notes: Larger CLIP model for SDXL
files:
- source: "open_clip_pytorch_model.bin"
dest: "clip-vit-bigg-14.bin"
- repo_id: google/siglip-so400m-patch14-384 - repo_id: google/siglip-so400m-patch14-384
description: SigLIP - For FLUX models description: SigLIP - For FLUX models
@@ -187,6 +226,9 @@ model_categories:
format: fp32 format: fp32
vram_gb: 2 vram_gb: 2
notes: Advanced image-text alignment notes: Advanced image-text alignment
files:
- source: "model.safetensors"
dest: "siglip-so400m-patch14-384.safetensors"
# ========================================================================== # ==========================================================================
# ANIMATEDIFF MODELS # ANIMATEDIFF MODELS
@@ -202,6 +244,9 @@ model_categories:
format: safetensors format: safetensors
vram_gb: 4 vram_gb: 4
notes: Motion modules for AnimateDiff text-to-video notes: Motion modules for AnimateDiff text-to-video
files:
- source: "mm_sd_v15_v2.ckpt"
dest: "mm_sd_v15_v2.ckpt"
# ========================================================================== # ==========================================================================
# IP-ADAPTER MODELS # IP-ADAPTER MODELS
@@ -216,6 +261,9 @@ model_categories:
format: safetensors format: safetensors
vram_gb: 4 vram_gb: 4
notes: Image prompt adapters for style transfer notes: Image prompt adapters for style transfer
files:
- source: "sdxl_models/ip-adapter_sdxl.safetensors"
dest: "ip-adapter_sdxl.safetensors"
# ============================================================================ # ============================================================================
# STORAGE & VRAM SUMMARIES # STORAGE & VRAM SUMMARIES