From b5e1f0ce2aca3c61018117a8ac94cde2ceb65434 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 23 Nov 2025 01:13:06 +0100 Subject: [PATCH] fix: use safetensors extension for CLIP Vision models MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CLIPVisionLoader validates against a list of .safetensors files only. Changed CLIP Vision model extensions from .bin back to .safetensors in both the model configuration and workflow files. Note: The actual files are PyTorch .bin format, but ComfyUI's CLIPVisionLoader expects .safetensors naming convention. The symlinks will point to the .bin files but use .safetensors extensions. Changes: - comfyui_models.yaml: Changed dest extensions to .safetensors - Workflows: Changed clip_name to .safetensors extension 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- .../ipadapter-composition-i2i-production-v1.json | 2 +- .../image-to-image/ipadapter-face-i2i-production-v1.json | 2 +- comfyui_models.yaml | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json b/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json index de1e724..4c89ef1 100644 --- a/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json +++ b/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json @@ -621,7 +621,7 @@ "1": 58 }, "widgets_values": [ - "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" ], "title": "CLIP Vision Loader", "flags": {}, diff --git a/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json b/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json index 8f0ea94..909ac6e 100644 --- a/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json +++ b/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json @@ -477,7 +477,7 @@ "1": 58 }, "widgets_values": [ - "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" ], "title": "CLIP Vision Loader", "flags": {}, diff --git a/comfyui_models.yaml b/comfyui_models.yaml index c4d2f9c..6d1a1da 100644 --- a/comfyui_models.yaml +++ b/comfyui_models.yaml @@ -219,7 +219,7 @@ model_categories: notes: Text-image understanding model files: - source: "pytorch_model.bin" - dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.bin" + dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k description: CLIP G - For SDXL IP-Adapter @@ -232,7 +232,7 @@ model_categories: notes: Larger CLIP model for SDXL files: - source: "open_clip_pytorch_model.bin" - dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" - repo_id: google/siglip-so400m-patch14-384 description: SigLIP - For FLUX models