From 1419efac2e2c43d06a580f1ded83c0c6097ef60c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Sebastian=20Kr=C3=BCger?= Date: Sun, 23 Nov 2025 01:16:02 +0100 Subject: [PATCH] fix: use h94/IP-Adapter repo for CLIP Vision safetensors MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changed CLIP Vision model source from openai/laion repos to h94/IP-Adapter which provides the models in proper safetensors format that CLIPVisionLoader can load directly. Model sources: - CLIP-ViT-H (SD 1.5): models/image_encoder/model.safetensors - CLIP-ViT-bigG (SDXL): sdxl_models/image_encoder/model.safetensors This fixes the "header too large" deserialization error caused by trying to load PyTorch .bin files as safetensors. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude --- comfyui_models.yaml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/comfyui_models.yaml b/comfyui_models.yaml index 6d1a1da..0431c68 100644 --- a/comfyui_models.yaml +++ b/comfyui_models.yaml @@ -208,7 +208,7 @@ model_categories: # SUPPORT MODELS (CLIP, IP-Adapter, etc.) # ========================================================================== support_models: - - repo_id: openai/clip-vit-large-patch14 + - repo_id: h94/IP-Adapter description: CLIP H - For SD 1.5 IP-Adapter size_gb: 2 essential: true @@ -216,12 +216,12 @@ model_categories: type: clip_vision format: fp32 vram_gb: 2 - notes: Text-image understanding model + notes: Text-image understanding model from IP-Adapter repo files: - - source: "pytorch_model.bin" + - source: "models/image_encoder/model.safetensors" dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" - - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k + - repo_id: h94/IP-Adapter description: CLIP G - For SDXL IP-Adapter size_gb: 7 essential: true @@ -229,9 +229,9 @@ model_categories: type: clip_vision format: fp32 vram_gb: 4 - notes: Larger CLIP model for SDXL + notes: Larger CLIP model for SDXL from IP-Adapter repo files: - - source: "open_clip_pytorch_model.bin" + - source: "sdxl_models/image_encoder/model.safetensors" dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" - repo_id: google/siglip-so400m-patch14-384