From f05e3940aa3050d22f76b9695e81cc494d3c6640 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 23 Nov 2025 01:00:56 +0100 Subject: [PATCH] fix: correct CLIP Vision model file extensions to .bin MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changed CLIP Vision model destination filenames from .safetensors to .bin to match actual PyTorch model format. The files are ZIP/pickle format with 'PK' magic bytes, not safetensors format, which was causing "header too large" deserialization errors in ComfyUI IPAdapterUnifiedLoader. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- comfyui_models.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/comfyui_models.yaml b/comfyui_models.yaml index 6d1a1da..c4d2f9c 100644 --- a/comfyui_models.yaml +++ b/comfyui_models.yaml @@ -219,7 +219,7 @@ model_categories: notes: Text-image understanding model files: - source: "pytorch_model.bin" - dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" + dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.bin" - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k description: CLIP G - For SDXL IP-Adapter @@ -232,7 +232,7 @@ model_categories: notes: Larger CLIP model for SDXL files: - source: "open_clip_pytorch_model.bin" - dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" + dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" - repo_id: google/siglip-so400m-patch14-384 description: SigLIP - For FLUX models