All checks were successful
Build and Push RunPod Docker Image / build-and-push (push) Successful in 15s
- Add DiffRhythm to arty.yml references and setup/comfyui-nodes - Install espeak-ng system dependency for phoneme processing - Add 7 DiffRhythm models to models_huggingface.yaml with file mappings: * ASLP-lab/DiffRhythm-1_2 (95s generation) * ASLP-lab/DiffRhythm-full (4m45s generation) * ASLP-lab/DiffRhythm-base * ASLP-lab/DiffRhythm-vae * OpenMuQ/MuQ-MuLan-large * OpenMuQ/MuQ-large-msd-iter * FacebookAI/xlm-roberta-base - Create 4 comprehensive workflows: * diffrhythm-simple-t2m-v1.json (basic 95s text-to-music) * diffrhythm-full-length-t2m-v1.json (4m45s full-length) * diffrhythm-reference-based-v1.json (style transfer with reference audio) * diffrhythm-random-generation-v1.json (no-prompt random generation) - Update storage requirements: 90GB essential, 149GB total 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
124 lines
3.8 KiB
JSON
124 lines
3.8 KiB
JSON
{
|
|
"last_node_id": 3,
|
|
"last_link_id": 2,
|
|
"nodes": [
|
|
{
|
|
"id": 1,
|
|
"type": "DiffRhythmRandomGeneration",
|
|
"pos": [100, 100],
|
|
"size": [400, 250],
|
|
"flags": {},
|
|
"order": 0,
|
|
"mode": 0,
|
|
"outputs": [
|
|
{
|
|
"name": "AUDIO",
|
|
"type": "AUDIO",
|
|
"links": [1]
|
|
}
|
|
],
|
|
"properties": {},
|
|
"widgets_values": [
|
|
95.0,
|
|
-1,
|
|
"cfm_model_v1_2",
|
|
"auto"
|
|
],
|
|
"title": "DiffRhythm Random Generation (No Prompt)"
|
|
},
|
|
{
|
|
"id": 2,
|
|
"type": "PreviewAudio",
|
|
"pos": [600, 100],
|
|
"size": [300, 100],
|
|
"flags": {},
|
|
"order": 1,
|
|
"mode": 0,
|
|
"inputs": [
|
|
{
|
|
"name": "audio",
|
|
"type": "AUDIO",
|
|
"link": 1
|
|
}
|
|
],
|
|
"properties": {},
|
|
"title": "Preview Audio"
|
|
},
|
|
{
|
|
"id": 3,
|
|
"type": "SaveAudio",
|
|
"pos": [600, 250],
|
|
"size": [300, 100],
|
|
"flags": {},
|
|
"order": 2,
|
|
"mode": 0,
|
|
"inputs": [
|
|
{
|
|
"name": "audio",
|
|
"type": "AUDIO",
|
|
"link": 2
|
|
}
|
|
],
|
|
"properties": {},
|
|
"widgets_values": [
|
|
"diffrhythm_random_output"
|
|
],
|
|
"title": "Save Audio"
|
|
}
|
|
],
|
|
"links": [
|
|
[1, 1, 0, 2, 0, "AUDIO"],
|
|
[2, 1, 0, 3, 0, "AUDIO"]
|
|
],
|
|
"groups": [],
|
|
"config": {},
|
|
"extra": {
|
|
"workflow_info": {
|
|
"name": "DiffRhythm Random Generation v1",
|
|
"description": "Random music generation without any prompt or guidance - pure AI creativity",
|
|
"version": "1.0.0",
|
|
"author": "valknar@pivoine.art",
|
|
"category": "text-to-music",
|
|
"tags": ["diffrhythm", "music-generation", "random", "no-prompt", "discovery"],
|
|
"requirements": {
|
|
"custom_nodes": ["ComfyUI_DiffRhythm"],
|
|
"models": ["ASLP-lab/DiffRhythm-1_2", "ASLP-lab/DiffRhythm-vae", "OpenMuQ/MuQ-MuLan-large", "OpenMuQ/MuQ-large-msd-iter", "FacebookAI/xlm-roberta-base"],
|
|
"vram_min": "12GB",
|
|
"vram_recommended": "16GB",
|
|
"system_deps": ["espeak-ng"]
|
|
},
|
|
"usage": {
|
|
"duration": "Fixed at 95 seconds for DiffRhythm 1.2 model",
|
|
"seed": "-1 (random seed each generation) or specific number for reproducibility",
|
|
"model": "cfm_model_v1_2 (DiffRhythm 1.2)",
|
|
"device": "auto (automatic GPU selection)",
|
|
"note": "NO prompt, NO guidance, NO reference audio - pure random generation"
|
|
},
|
|
"use_cases": [
|
|
"Discovery: Explore what the model can create without constraints",
|
|
"Inspiration: Generate unexpected musical ideas and styles",
|
|
"Testing: Quick way to verify model is working correctly",
|
|
"Ambient music: Create random background music for various uses",
|
|
"Sample generation: Generate large batches of diverse music samples"
|
|
],
|
|
"workflow_tips": [
|
|
"Run multiple times to discover different musical styles",
|
|
"Use seed=-1 for completely random output each time",
|
|
"Use fixed seed to reproduce interesting random results",
|
|
"Batch process: Run 10-20 times to find interesting compositions",
|
|
"Save any interesting results with their seed numbers"
|
|
],
|
|
"notes": [
|
|
"This workflow demonstrates DiffRhythm's ability to generate music without any input",
|
|
"All DiffRhythm parameters are optional - this is the ultimate proof",
|
|
"Results can range from ambient to energetic, classical to electronic",
|
|
"Each generation is unique (with seed=-1)",
|
|
"Generation time: ~30-60 seconds on RTX 4090",
|
|
"Perfect for discovering unexpected musical combinations",
|
|
"PLACEHOLDER: Actual node names and parameters need to be updated after ComfyUI_DiffRhythm installation"
|
|
]
|
|
}
|
|
},
|
|
"version": 0.4
|
|
}
|