diff --git a/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json b/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json index de1e724..4c89ef1 100644 --- a/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json +++ b/comfyui/workflows/image-to-image/ipadapter-composition-i2i-production-v1.json @@ -621,7 +621,7 @@ "1": 58 }, "widgets_values": [ - "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" ], "title": "CLIP Vision Loader", "flags": {}, diff --git a/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json b/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json index 8f0ea94..909ac6e 100644 --- a/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json +++ b/comfyui/workflows/image-to-image/ipadapter-face-i2i-production-v1.json @@ -477,7 +477,7 @@ "1": 58 }, "widgets_values": [ - "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" ], "title": "CLIP Vision Loader", "flags": {}, diff --git a/comfyui_models.yaml b/comfyui_models.yaml index c4d2f9c..6d1a1da 100644 --- a/comfyui_models.yaml +++ b/comfyui_models.yaml @@ -219,7 +219,7 @@ model_categories: notes: Text-image understanding model files: - source: "pytorch_model.bin" - dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.bin" + dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k description: CLIP G - For SDXL IP-Adapter @@ -232,7 +232,7 @@ model_categories: notes: Larger CLIP model for SDXL files: - source: "open_clip_pytorch_model.bin" - dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.bin" + dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" - repo_id: google/siglip-so400m-patch14-384 description: SigLIP - For FLUX models