Files
runpod/comfyui/workflows/text-to-music/diffrhythm-reference-based-v1.json
Sebastian Krüger d74a7cb7cb
All checks were successful
Build and Push RunPod Docker Image / build-and-push (push) Successful in 14s
fix: replace custom Pivoine node with direct DiffRhythm patch
- Remove custom PivoineDiffRhythmRun wrapper node
- Add git patch file for ComfyUI_DiffRhythm __init__.py
- Patch adds LlamaConfig fix at import time
- Add arty script 'fix/diffrhythm-patch' to apply patch
- Revert all workflows to use original DiffRhythmRun
- Remove startup_patch.py and revert start.sh

This approach is cleaner and more maintainable than wrapping the node.
The patch directly fixes the tensor dimension mismatch (32 vs 64) in
DiffRhythm's rotary position embeddings by ensuring num_attention_heads
and num_key_value_heads are properly set based on hidden_size.

References:
- https://github.com/billwuhao/ComfyUI_DiffRhythm/issues/44
- https://github.com/billwuhao/ComfyUI_DiffRhythm/issues/48
2025-11-24 19:27:18 +01:00

165 lines
4.8 KiB
JSON

{
"last_node_id": 4,
"last_link_id": 3,
"nodes": [
{
"id": 1,
"type": "LoadAudio",
"pos": [100, 100],
"size": [300, 100],
"flags": {},
"order": 0,
"mode": 0,
"outputs": [
{
"name": "AUDIO",
"type": "AUDIO",
"links": [1]
}
],
"properties": {
"Node name for S&R": "LoadAudio"
},
"widgets_values": [
"reference_audio.wav"
],
"title": "Load Reference Audio"
},
{
"id": 2,
"type": "DiffRhythmRun",
"pos": [500, 100],
"size": [400, 450],
"flags": {},
"order": 1,
"mode": 0,
"inputs": [
{
"name": "style_audio_or_edit_song",
"type": "AUDIO",
"link": 1
}
],
"outputs": [
{
"name": "AUDIO",
"type": "AUDIO",
"links": [2, 3]
}
],
"properties": {
"Node name for S&R": "DiffRhythmRun"
},
"widgets_values": [
"cfm_model_v1_2.pt",
"Energetic rock music with driving guitar riffs and powerful drums",
true,
"euler",
30,
5,
"speed",
456,
"randomize",
false,
"[-1, 20], [60, -1]"
],
"title": "DiffRhythm Reference-Based Generation"
},
{
"id": 3,
"type": "PreviewAudio",
"pos": [1000, 100],
"size": [300, 100],
"flags": {},
"order": 2,
"mode": 0,
"inputs": [
{
"name": "audio",
"type": "AUDIO",
"link": 2
}
],
"properties": {
"Node name for S&R": "PreviewAudio"
},
"title": "Preview Generated Audio"
},
{
"id": 4,
"type": "SaveAudio",
"pos": [1000, 250],
"size": [300, 100],
"flags": {},
"order": 3,
"mode": 0,
"inputs": [
{
"name": "audio",
"type": "AUDIO",
"link": 3
}
],
"properties": {
"Node name for S&R": "SaveAudio"
},
"widgets_values": [
"diffrhythm_reference_output"
],
"title": "Save Audio"
}
],
"links": [
[1, 1, 0, 2, 0, "AUDIO"],
[2, 2, 0, 3, 0, "AUDIO"],
[3, 2, 0, 4, 0, "AUDIO"]
],
"groups": [],
"config": {},
"extra": {
"workflow_info": {
"name": "DiffRhythm Reference-Based Generation v1",
"description": "Generate new music based on a reference audio file while following text prompt guidance",
"version": "1.0.0",
"author": "valknar@pivoine.art",
"category": "text-to-music",
"tags": ["diffrhythm", "music-generation", "reference-based", "style-transfer"],
"requirements": {
"custom_nodes": ["ComfyUI_DiffRhythm"],
"models": ["ASLP-lab/DiffRhythm-1_2", "ASLP-lab/DiffRhythm-vae", "OpenMuQ/MuQ-MuLan-large", "OpenMuQ/MuQ-large-msd-iter", "FacebookAI/xlm-roberta-base"],
"vram_min": "14GB",
"vram_recommended": "18GB",
"system_deps": ["espeak-ng"]
},
"usage": {
"reference_audio": "Path to reference audio file (WAV, MP3, or other supported formats)",
"model": "cfm_model_v1_2.pt (DiffRhythm 1.2)",
"style_prompt": "Text description guiding the style and characteristics of generated music",
"unload_model": "Boolean to unload model after generation (default: true)",
"odeint_method": "ODE solver: euler, midpoint, rk4, implicit_adams (default: euler)",
"steps": "Number of diffusion steps: 1-100 (default: 30)",
"cfg": "Classifier-free guidance scale: 1-10 (default: 5 for reference-based)",
"quality_or_speed": "Generation mode: quality or speed (default: speed)",
"seed": "Random seed for reproducibility (default: 456)",
"edit": "Enable segment editing mode (default: false)",
"edit_segments": "Segments to edit when edit=true"
},
"use_cases": [
"Style transfer: Apply the style of reference music to new prompt",
"Variations: Create variations of existing compositions",
"Genre transformation: Transform music to different genre while keeping structure",
"Mood adaptation: Change the mood/emotion while maintaining musical elements"
],
"notes": [
"This workflow combines reference audio with text prompt guidance",
"The reference audio is connected to the style_audio_or_edit_song input",
"Higher cfg values (7-10) = closer adherence to both prompt and reference",
"Lower cfg values (2-4) = more creative interpretation",
"Reference audio should ideally be similar duration to target (95s for cfm_model_v1_2.pt)",
"Can use any format supported by ComfyUI's LoadAudio node"
]
}
},
"version": 0.4
}