diff --git a/comfyui_models.yaml b/comfyui_models.yaml index 6d1a1da..0431c68 100644 --- a/comfyui_models.yaml +++ b/comfyui_models.yaml @@ -208,7 +208,7 @@ model_categories: # SUPPORT MODELS (CLIP, IP-Adapter, etc.) # ========================================================================== support_models: - - repo_id: openai/clip-vit-large-patch14 + - repo_id: h94/IP-Adapter description: CLIP H - For SD 1.5 IP-Adapter size_gb: 2 essential: true @@ -216,12 +216,12 @@ model_categories: type: clip_vision format: fp32 vram_gb: 2 - notes: Text-image understanding model + notes: Text-image understanding model from IP-Adapter repo files: - - source: "pytorch_model.bin" + - source: "models/image_encoder/model.safetensors" dest: "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors" - - repo_id: laion/CLIP-ViT-bigG-14-laion2B-39B-b160k + - repo_id: h94/IP-Adapter description: CLIP G - For SDXL IP-Adapter size_gb: 7 essential: true @@ -229,9 +229,9 @@ model_categories: type: clip_vision format: fp32 vram_gb: 4 - notes: Larger CLIP model for SDXL + notes: Larger CLIP model for SDXL from IP-Adapter repo files: - - source: "open_clip_pytorch_model.bin" + - source: "sdxl_models/image_encoder/model.safetensors" dest: "CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors" - repo_id: google/siglip-so400m-patch14-384