diff --git a/PLAN.md b/PLAN.md index 38cfc9c..de4abcf 100644 --- a/PLAN.md +++ b/PLAN.md @@ -95,10 +95,16 @@ - ✅ Per-track gain and pan during playback - ✅ Solo/Mute handling during playback - ✅ Per-track effect chains with device rack -- ✅ Collapsible effects section below each track +- ✅ Collapsible effects section below each track (192px height) - ✅ Effect browser with categorized effects - ✅ Horizontal scrolling device rack (Ableton-style) -- ✅ Individual effect cards with expand/collapse +- ✅ Individual effect cards with side-folding design (40px collapsed, 384px+ expanded) +- ✅ Real-time parameter controls for all effects (filters, dynamics, time-based, advanced) +- ✅ Inline parameter editing with sliders and controls (multi-column grid layout) +- ✅ Real-time effect processing during playback with Web Audio API nodes +- ✅ Effect bypass functionality (disable/enable effects in real-time) +- ✅ Supported real-time effects: All filters, compressor, limiter, gate, delay +- 🔲 Advanced real-time effects: Reverb, chorus, flanger, phaser, distortion (TODO: Complex node graphs) - 🔲 Master channel effects (TODO: Implement master effect chain UI similar to per-track effects) ### Next Steps @@ -570,10 +576,14 @@ audio-ui/ - [ ] Send/Return effects - FUTURE - [ ] Sidechain support (advanced) - FUTURE -#### 7.4 Track Effects (Pending - Phase 8+) -- [ ] Per-track effect chain -- [ ] Effect rack UI -- [ ] Effect bypass per track +#### 7.4 Track Effects (Complete) +- [x] Per-track effect chain +- [x] Effect rack UI +- [x] Effect bypass per track +- [x] Real-time effect processing during playback +- [x] Add/remove effects during playback +- [x] Real-time parameter updates +- [x] Effect chain persistence (localStorage) ### Phase 8: Recording diff --git a/components/editor/AudioEditor.tsx b/components/editor/AudioEditor.tsx index c7aca0b..f295dd1 100644 --- a/components/editor/AudioEditor.tsx +++ b/components/editor/AudioEditor.tsx @@ -33,6 +33,15 @@ export function AudioEditor() { clearTracks, } = useMultiTrack(); + // Log tracks to see if they update + React.useEffect(() => { + console.log('[AudioEditor] Tracks updated:', tracks.map(t => ({ + name: t.name, + effectCount: t.effectChain.effects.length, + effects: t.effectChain.effects.map(e => e.name) + }))); + }, [tracks]); + const { isPlaying, currentTime, @@ -280,7 +289,7 @@ export function AudioEditor() { {/* Track Actions */}
- diff --git a/components/effects/EffectBrowser.tsx b/components/effects/EffectBrowser.tsx index 2cc3a44..c29ad21 100644 --- a/components/effects/EffectBrowser.tsx +++ b/components/effects/EffectBrowser.tsx @@ -19,7 +19,6 @@ const EFFECT_CATEGORIES = { 'Time-Based': ['delay', 'reverb', 'chorus', 'flanger', 'phaser'] as EffectType[], 'Distortion': ['distortion', 'bitcrusher'] as EffectType[], 'Pitch & Time': ['pitch', 'timestretch'] as EffectType[], - 'Utility': ['normalize', 'fadeIn', 'fadeOut', 'reverse'] as EffectType[], }; export function EffectBrowser({ open, onClose, onSelectEffect }: EffectBrowserProps) { diff --git a/components/effects/EffectDevice.tsx b/components/effects/EffectDevice.tsx index f73a897..769dacb 100644 --- a/components/effects/EffectDevice.tsx +++ b/components/effects/EffectDevice.tsx @@ -1,10 +1,11 @@ 'use client'; import * as React from 'react'; -import { ChevronDown, ChevronUp, Power, X } from 'lucide-react'; +import { ChevronLeft, ChevronRight, Power, X } from 'lucide-react'; import { Button } from '@/components/ui/Button'; import { cn } from '@/lib/utils/cn'; import type { ChainEffect } from '@/lib/audio/effects/chain'; +import { EffectParameters } from './EffectParameters'; export interface EffectDeviceProps { effect: ChainEffect; @@ -24,84 +25,80 @@ export function EffectDevice({ return (
- {/* Device Header */} -
+ {!isExpanded ? ( + /* Collapsed State - No Header */ -
- - -
-
- - {/* Device Parameters */} - {isExpanded && ( -
-
-
- Type: {effect.type} -
- {effect.parameters && Object.keys(effect.parameters).length > 0 && ( -
- Parameters: -
- {Object.entries(effect.parameters).map(([key, value]) => ( -
- {key}: - {String(value)} -
- ))} -
-
- )} -
-
- Parameter controls coming soon -
-
- )} - - {/* Collapsed State Indicator */} - {!isExpanded && ( -
- - {effect.type} + {effect.name} -
+
+ + ) : ( + <> + {/* Full-Width Header Row */} +
+ + {effect.name} + + +
+ + {/* Device Body */} +
+ +
+ )}
); diff --git a/components/effects/EffectParameters.tsx b/components/effects/EffectParameters.tsx new file mode 100644 index 0000000..77813c7 --- /dev/null +++ b/components/effects/EffectParameters.tsx @@ -0,0 +1,722 @@ +'use client'; + +import * as React from 'react'; +import { Button } from '@/components/ui/Button'; +import { Slider } from '@/components/ui/Slider'; +import type { ChainEffect, EffectType } from '@/lib/audio/effects/chain'; +import type { + PitchShifterParameters, + TimeStretchParameters, + DistortionParameters, + BitcrusherParameters, +} from '@/lib/audio/effects/advanced'; +import type { + CompressorParameters, + LimiterParameters, + GateParameters, +} from '@/lib/audio/effects/dynamics'; +import type { + DelayParameters, + ReverbParameters, + ChorusParameters, + FlangerParameters, + PhaserParameters, +} from '@/lib/audio/effects/time-based'; +import type { FilterOptions } from '@/lib/audio/effects/filters'; + +export interface EffectParametersProps { + effect: ChainEffect; + onUpdateParameters?: (parameters: any) => void; +} + +export function EffectParameters({ effect, onUpdateParameters }: EffectParametersProps) { + const params = effect.parameters || {}; + + const updateParam = (key: string, value: any) => { + if (onUpdateParameters) { + onUpdateParameters({ ...params, [key]: value }); + } + }; + + // Filter effects + if (['lowpass', 'highpass', 'bandpass', 'notch', 'lowshelf', 'highshelf', 'peaking'].includes(effect.type)) { + const filterParams = params as FilterOptions; + return ( +
+
+ + updateParam('frequency', value)} + min={20} + max={20000} + step={1} + /> +
+
+ + updateParam('Q', value)} + min={0.1} + max={20} + step={0.1} + /> +
+ {['lowshelf', 'highshelf', 'peaking'].includes(effect.type) && ( +
+ + updateParam('gain', value)} + min={-40} + max={40} + step={0.5} + /> +
+ )} +
+ ); + } + + // Compressor + if (effect.type === 'compressor') { + const compParams = params as CompressorParameters; + return ( +
+
+ + updateParam('threshold', value)} + min={-60} + max={0} + step={0.5} + /> +
+
+ + updateParam('ratio', value)} + min={1} + max={20} + step={0.5} + /> +
+
+ + updateParam('knee', value)} + min={0} + max={40} + step={1} + /> +
+
+ + updateParam('attack', value)} + min={0.001} + max={1} + step={0.001} + /> +
+
+ + updateParam('release', value)} + min={0.01} + max={3} + step={0.01} + /> +
+
+ ); + } + + // Limiter + if (effect.type === 'limiter') { + const limParams = params as LimiterParameters; + return ( +
+
+ + updateParam('threshold', value)} + min={-30} + max={0} + step={0.5} + /> +
+
+ + updateParam('release', value)} + min={0.01} + max={1} + step={0.01} + /> +
+
+ + updateParam('makeupGain', value)} + min={0} + max={20} + step={0.5} + /> +
+
+ ); + } + + // Gate + if (effect.type === 'gate') { + const gateParams = params as GateParameters; + return ( +
+
+ + updateParam('threshold', value)} + min={-80} + max={0} + step={0.5} + /> +
+
+ + updateParam('ratio', value)} + min={1} + max={20} + step={0.5} + /> +
+
+ + updateParam('attack', value)} + min={0.0001} + max={0.5} + step={0.0001} + /> +
+
+ + updateParam('release', value)} + min={0.01} + max={3} + step={0.01} + /> +
+
+ ); + } + + // Delay + if (effect.type === 'delay') { + const delayParams = params as DelayParameters; + return ( +
+
+ + updateParam('time', value)} + min={0.001} + max={2} + step={0.001} + /> +
+
+ + updateParam('feedback', value)} + min={0} + max={0.9} + step={0.01} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Reverb + if (effect.type === 'reverb') { + const reverbParams = params as ReverbParameters; + return ( +
+
+ + updateParam('roomSize', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('damping', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Chorus + if (effect.type === 'chorus') { + const chorusParams = params as ChorusParameters; + return ( +
+
+ + updateParam('rate', value)} + min={0.1} + max={10} + step={0.1} + /> +
+
+ + updateParam('depth', value)} + min={0.0001} + max={0.01} + step={0.0001} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Flanger + if (effect.type === 'flanger') { + const flangerParams = params as FlangerParameters; + return ( +
+
+ + updateParam('rate', value)} + min={0.1} + max={10} + step={0.1} + /> +
+
+ + updateParam('depth', value)} + min={0.0001} + max={0.01} + step={0.0001} + /> +
+
+ + updateParam('feedback', value)} + min={0} + max={0.95} + step={0.01} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Phaser + if (effect.type === 'phaser') { + const phaserParams = params as PhaserParameters; + return ( +
+
+ + updateParam('rate', value)} + min={0.1} + max={10} + step={0.1} + /> +
+
+ + updateParam('depth', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('stages', Math.round(value))} + min={2} + max={12} + step={1} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Pitch Shifter + if (effect.type === 'pitch') { + const pitchParams = params as PitchShifterParameters; + return ( +
+
+ + updateParam('semitones', Math.round(value))} + min={-12} + max={12} + step={1} + /> +
+
+ + updateParam('cents', Math.round(value))} + min={-100} + max={100} + step={1} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Time Stretch + if (effect.type === 'timestretch') { + const stretchParams = params as TimeStretchParameters; + return ( +
+
+ + updateParam('rate', value)} + min={0.5} + max={2} + step={0.01} + /> +
+
+ updateParam('preservePitch', e.target.checked)} + className="h-3 w-3 rounded border-border" + /> + +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Distortion + if (effect.type === 'distortion') { + const distParams = params as DistortionParameters; + return ( +
+
+ +
+ {(['soft', 'hard', 'tube'] as const).map((type) => ( + + ))} +
+
+
+ + updateParam('drive', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('tone', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('output', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Bitcrusher + if (effect.type === 'bitcrusher') { + const crushParams = params as BitcrusherParameters; + return ( +
+
+ + updateParam('bitDepth', Math.round(value))} + min={1} + max={16} + step={1} + /> +
+
+ + updateParam('sampleRate', Math.round(value))} + min={100} + max={48000} + step={100} + /> +
+
+ + updateParam('mix', value)} + min={0} + max={1} + step={0.01} + /> +
+
+ ); + } + + // Fallback for unknown effects + return ( +
+ No parameters available +
+ ); +} diff --git a/components/tracks/Track.tsx b/components/tracks/Track.tsx index d9c88f9..876832d 100644 --- a/components/tracks/Track.tsx +++ b/components/tracks/Track.tsx @@ -28,6 +28,7 @@ export interface TrackProps { onLoadAudio?: (buffer: AudioBuffer) => void; onToggleEffect?: (effectId: string) => void; onRemoveEffect?: (effectId: string) => void; + onUpdateEffect?: (effectId: string, parameters: any) => void; onAddEffect?: (effectType: EffectType) => void; } @@ -49,6 +50,7 @@ export function Track({ onLoadAudio, onToggleEffect, onRemoveEffect, + onUpdateEffect, onAddEffect, }: TrackProps) { const canvasRef = React.useRef(null); @@ -509,8 +511,8 @@ export function Track({ {/* Horizontal scrolling device rack - expanded state */} {showEffects && ( -
-
+
+
{track.effectChain.effects.length === 0 ? (
No devices. Click + to add an effect. @@ -522,6 +524,7 @@ export function Track({ effect={effect} onToggleEnabled={() => onToggleEffect?.(effect.id)} onRemove={() => onRemoveEffect?.(effect.id)} + onUpdateParameters={(params) => onUpdateEffect?.(effect.id, params)} /> )) )} diff --git a/components/tracks/TrackList.tsx b/components/tracks/TrackList.tsx index 5186c3c..48ff501 100644 --- a/components/tracks/TrackList.tsx +++ b/components/tracks/TrackList.tsx @@ -124,6 +124,15 @@ export function TrackList({ }; onUpdateTrack(track.id, { effectChain: updatedChain }); }} + onUpdateEffect={(effectId, parameters) => { + const updatedChain = { + ...track.effectChain, + effects: track.effectChain.effects.map((e) => + e.id === effectId ? { ...e, parameters } : e + ), + }; + onUpdateTrack(track.id, { effectChain: updatedChain }); + }} onAddEffect={(effectType) => { const newEffect = createEffect( effectType, diff --git a/lib/audio/effects/chain.ts b/lib/audio/effects/chain.ts index 6fc209a..81a20a2 100644 --- a/lib/audio/effects/chain.ts +++ b/lib/audio/effects/chain.ts @@ -25,11 +25,6 @@ import type { FilterOptions } from './filters'; // Effect type identifier export type EffectType = - // Basic - | 'normalize' - | 'fadeIn' - | 'fadeOut' - | 'reverse' // Filters | 'lowpass' | 'highpass' @@ -116,7 +111,7 @@ export function createEffect( type, name, enabled: true, - parameters, + parameters: parameters || getDefaultParameters(type), }; } @@ -230,14 +225,63 @@ export function loadPreset(preset: EffectPreset): EffectChain { return JSON.parse(JSON.stringify(preset.chain)); // Deep clone } +/** + * Get default parameters for an effect type + */ +export function getDefaultParameters(type: EffectType): EffectParameters { + switch (type) { + // Filters + case 'lowpass': + case 'highpass': + return { frequency: 1000, Q: 1 } as FilterOptions; + case 'bandpass': + case 'notch': + return { frequency: 1000, Q: 1 } as FilterOptions; + case 'lowshelf': + case 'highshelf': + return { frequency: 1000, Q: 1, gain: 0 } as FilterOptions; + case 'peaking': + return { frequency: 1000, Q: 1, gain: 0 } as FilterOptions; + + // Dynamics + case 'compressor': + return { threshold: -24, ratio: 4, attack: 0.003, release: 0.25, knee: 30, makeupGain: 0 } as CompressorParameters; + case 'limiter': + return { threshold: -3, attack: 0.001, release: 0.05, makeupGain: 0 } as LimiterParameters; + case 'gate': + return { threshold: -40, ratio: 10, attack: 0.001, release: 0.1, knee: 0 } as GateParameters; + + // Time-based + case 'delay': + return { time: 0.5, feedback: 0.3, mix: 0.5 } as DelayParameters; + case 'reverb': + return { roomSize: 0.5, damping: 0.5, mix: 0.3 } as ReverbParameters; + case 'chorus': + return { rate: 1.5, depth: 0.002, mix: 0.5 } as ChorusParameters; + case 'flanger': + return { rate: 0.5, depth: 0.002, feedback: 0.5, mix: 0.5 } as FlangerParameters; + case 'phaser': + return { rate: 0.5, depth: 0.5, stages: 4, mix: 0.5 } as PhaserParameters; + + // Advanced + case 'distortion': + return { drive: 0.5, type: 'soft', output: 0.7, mix: 1 } as DistortionParameters; + case 'pitch': + return { semitones: 0, cents: 0, mix: 1 } as PitchShifterParameters; + case 'timestretch': + return { rate: 1.0, preservePitch: false, mix: 1 } as TimeStretchParameters; + case 'bitcrusher': + return { bitDepth: 8, sampleRate: 8000, mix: 1 } as BitcrusherParameters; + + default: + return {}; + } +} + /** * Get effect display name */ export const EFFECT_NAMES: Record = { - normalize: 'Normalize', - fadeIn: 'Fade In', - fadeOut: 'Fade Out', - reverse: 'Reverse', lowpass: 'Low-Pass Filter', highpass: 'High-Pass Filter', bandpass: 'Band-Pass Filter', diff --git a/lib/audio/effects/processor.ts b/lib/audio/effects/processor.ts new file mode 100644 index 0000000..7c677f1 --- /dev/null +++ b/lib/audio/effects/processor.ts @@ -0,0 +1,1059 @@ +import type { EffectChain, ChainEffect } from './chain'; +import type { FilterOptions } from './filters'; +import type { + CompressorParameters, + LimiterParameters, + GateParameters, +} from './dynamics'; +import type { + DelayParameters, + ReverbParameters, + ChorusParameters, + FlangerParameters, + PhaserParameters, +} from './time-based'; +import type { + DistortionParameters, + BitcrusherParameters, + PitchShifterParameters, + TimeStretchParameters, +} from './advanced'; + +/** + * Apply effect chain to audio signal using Web Audio API nodes + * Creates and connects audio nodes based on the effect chain + * @param audioContext - The Web Audio API context + * @param inputNode - The audio node to apply effects to + * @param chain - The effect chain configuration + * @returns The final output node (or input if no effects) + */ +export interface EffectNodeInfo { + effect: ChainEffect; + node: any; // Can be AudioNode or custom structure with input/output + dryGain?: GainNode; // Dry signal (bypass) + wetGain?: GainNode; // Wet signal (through effect) + // Internal nodes for complex effects (for real-time parameter updates) + internalNodes?: { + lfo?: OscillatorNode; + lfoGain?: GainNode; + delay?: DelayNode; + delay1?: DelayNode; + delay2?: DelayNode; + feedback?: GainNode; + wetMix?: GainNode; + dryMix?: GainNode; + convolver?: ConvolverNode; + allpassFilters?: BiquadFilterNode[]; + waveshaper?: WaveShaperNode; + preGain?: GainNode; + postGain?: GainNode; + }; +} + +export function applyEffectChain( + audioContext: AudioContext, + inputNode: AudioNode, + chain: EffectChain +): { outputNode: AudioNode; effectNodes: EffectNodeInfo[] } { + let currentNode: AudioNode = inputNode; + const effectNodes: EffectNodeInfo[] = []; + + console.log('[applyEffectChain] Processing', chain.effects.length, 'effects'); + + // Apply each effect in the chain (we'll handle bypass via gain) + for (const effect of chain.effects) { + console.log('[applyEffectChain] Effect:', effect.name, 'enabled:', effect.enabled, 'type:', effect.type, 'params:', effect.parameters); + + const effectNode = createEffectNode(audioContext, effect); + console.log('[applyEffectChain] Created effect node:', effectNode ? 'success' : 'null'); + + if (effectNode) { + // Create bypass mechanism using gain nodes + const dryGain = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const output = audioContext.createGain(); + + // Set bypass state + if (effect.enabled) { + dryGain.gain.value = 0; // No dry signal + wetGain.gain.value = 1; // Full wet signal + } else { + dryGain.gain.value = 1; // Full dry signal + wetGain.gain.value = 0; // No wet signal + } + + // Connect dry path (bypass) + currentNode.connect(dryGain); + dryGain.connect(output); + + // Connect wet path (through effect) + if ('input' in effectNode && 'output' in effectNode) { + currentNode.connect(effectNode.input as AudioNode); + (effectNode.output as AudioNode).connect(wetGain); + } else { + currentNode.connect(effectNode); + effectNode.connect(wetGain); + } + wetGain.connect(output); + + effectNodes.push({ + effect, + node: effectNode, + dryGain, + wetGain, + internalNodes: (effectNode as any).internalNodes // Store internal nodes if they exist + }); + + currentNode = output; + console.log('[applyEffectChain] Connected with bypass routing'); + } + } + + console.log('[applyEffectChain] Returning output node with', effectNodes.length, 'effect nodes'); + return { outputNode: currentNode, effectNodes }; +} + +/** + * Update effect node parameters in real-time + */ +export function updateEffectParameters( + audioContext: AudioContext, + effectNodeInfo: EffectNodeInfo, + newEffect: ChainEffect +): void { + const node = effectNodeInfo.node; + const params = newEffect.parameters || {}; + + console.log('[updateEffectParameters] Updating', newEffect.type, 'with params:', params); + + switch (newEffect.type) { + // Filters - can update in real-time + case 'lowpass': + case 'highpass': + case 'bandpass': + case 'notch': + case 'lowshelf': + case 'highshelf': + case 'peaking': { + const filterParams = params as FilterOptions; + const filter = node as BiquadFilterNode; + if (filter.frequency) { + filter.frequency.setValueAtTime(filterParams.frequency || 1000, audioContext.currentTime); + filter.Q.setValueAtTime(filterParams.Q || 1, audioContext.currentTime); + if (filterParams.gain !== undefined && filter.gain) { + filter.gain.setValueAtTime(filterParams.gain, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated filter params'); + } + break; + } + + // Time-based effects with internal nodes + case 'delay': { + const delayParams = params as DelayParameters; + if (effectNodeInfo.internalNodes) { + const { delay, feedback, wetMix, dryMix } = effectNodeInfo.internalNodes; + + if (delay) { + delay.delayTime.setValueAtTime(delayParams.time || 0.5, audioContext.currentTime); + } + if (feedback) { + feedback.gain.setValueAtTime(delayParams.feedback || 0.3, audioContext.currentTime); + } + if (wetMix && dryMix) { + const mix = delayParams.mix || 0.5; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated delay params in real-time'); + } + break; + } + + case 'reverb': { + const reverbParams = params as ReverbParameters; + if (effectNodeInfo.internalNodes) { + const { wetMix, dryMix } = effectNodeInfo.internalNodes; + + // Note: roomSize and damping require impulse response regeneration + // For now, we can only update the mix in real-time + if (wetMix && dryMix) { + const mix = reverbParams.mix || 0.3; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated reverb mix in real-time (roomSize/damping require effect recreation)'); + } + break; + } + + case 'chorus': { + const chorusParams = params as ChorusParameters; + if (effectNodeInfo.internalNodes) { + const { lfo, lfoGain, wetMix, dryMix } = effectNodeInfo.internalNodes; + + if (lfo) { + lfo.frequency.setValueAtTime(chorusParams.rate || 1.5, audioContext.currentTime); + } + if (lfoGain) { + lfoGain.gain.setValueAtTime((chorusParams.depth || 0.002) * 1000, audioContext.currentTime); + } + if (wetMix && dryMix) { + const mix = chorusParams.mix || 0.5; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated chorus params in real-time'); + } + break; + } + + case 'flanger': { + const flangerParams = params as FlangerParameters; + if (effectNodeInfo.internalNodes) { + const { lfo, lfoGain, feedback, wetMix, dryMix } = effectNodeInfo.internalNodes; + + if (lfo) { + lfo.frequency.setValueAtTime(flangerParams.rate || 0.5, audioContext.currentTime); + } + if (lfoGain) { + lfoGain.gain.setValueAtTime((flangerParams.depth || 0.002) * 1000, audioContext.currentTime); + } + if (feedback) { + feedback.gain.setValueAtTime(flangerParams.feedback || 0.5, audioContext.currentTime); + } + if (wetMix && dryMix) { + const mix = flangerParams.mix || 0.5; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated flanger params in real-time'); + } + break; + } + + case 'phaser': { + const phaserParams = params as PhaserParameters; + if (effectNodeInfo.internalNodes) { + const { lfo, lfoGain, allpassFilters, wetMix, dryMix } = effectNodeInfo.internalNodes; + + if (lfo) { + lfo.frequency.setValueAtTime(phaserParams.rate || 0.5, audioContext.currentTime); + } + if (lfoGain) { + lfoGain.gain.setValueAtTime((phaserParams.depth || 0.5) * 1000, audioContext.currentTime); + } + // Note: Changing stages count requires rebuilding the filter chain + if (allpassFilters && phaserParams.stages) { + // Update base frequencies for existing filters + const stages = Math.min(phaserParams.stages, allpassFilters.length); + for (let i = 0; i < stages; i++) { + allpassFilters[i].frequency.value = 500 + i * 500; + } + } + if (wetMix && dryMix) { + const mix = phaserParams.mix || 0.5; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + console.log('[updateEffectParameters] Updated phaser params in real-time'); + } + break; + } + + case 'distortion': { + const distParams = params as DistortionParameters; + if (effectNodeInfo.internalNodes) { + const { waveshaper, preGain, postGain, wetMix, dryMix } = effectNodeInfo.internalNodes; + + // Note: Changing drive or type requires regenerating the waveshaper curve + // For now, we can update output level and mix + if (postGain) { + const outputLevel = distParams.output || 0.7; + postGain.gain.setValueAtTime(outputLevel, audioContext.currentTime); + } + if (wetMix && dryMix) { + const mix = distParams.mix || 1; + wetMix.gain.setValueAtTime(mix, audioContext.currentTime); + dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); + } + + // If drive or type changed, we need to regenerate the curve + if (waveshaper && preGain && distParams.drive !== undefined) { + const drive = (distParams.drive || 0.5) * 50 + 1; + const distType = distParams.type || 'soft'; + + const samples = 44100; + const curve = new Float32Array(samples); + + for (let i = 0; i < samples; i++) { + const x = (i / samples) * 2 - 1; + const driven = x * drive; + + if (distType === 'soft') { + curve[i] = Math.tanh(driven); + } else if (distType === 'hard') { + curve[i] = Math.max(-1, Math.min(1, driven)); + } else { // tube + curve[i] = driven > 0 ? 1 - Math.exp(-driven) : -1 + Math.exp(driven); + } + } + + waveshaper.curve = curve; + preGain.gain.setValueAtTime(drive, audioContext.currentTime); + if (postGain) { + const outputLevel = distParams.output || 0.7; + postGain.gain.setValueAtTime(outputLevel / drive, audioContext.currentTime); + } + } + + console.log('[updateEffectParameters] Updated distortion params in real-time'); + } + break; + } + + // Dynamics effects + case 'compressor': { + const compParams = params as CompressorParameters; + const compressor = node as DynamicsCompressorNode; + if (compressor.threshold) { + compressor.threshold.setValueAtTime(compParams.threshold || -24, audioContext.currentTime); + compressor.ratio.setValueAtTime(compParams.ratio || 4, audioContext.currentTime); + compressor.attack.setValueAtTime(compParams.attack || 0.003, audioContext.currentTime); + compressor.release.setValueAtTime(compParams.release || 0.25, audioContext.currentTime); + compressor.knee.setValueAtTime(compParams.knee || 30, audioContext.currentTime); + console.log('[updateEffectParameters] Updated compressor params in real-time'); + } + break; + } + + case 'limiter': { + const limParams = params as LimiterParameters; + const limiter = node as DynamicsCompressorNode; + if (limiter.threshold) { + limiter.threshold.setValueAtTime(limParams.threshold || -3, audioContext.currentTime); + limiter.release.setValueAtTime(limParams.release || 0.05, audioContext.currentTime); + console.log('[updateEffectParameters] Updated limiter params in real-time'); + } + break; + } + + case 'gate': { + const gateParams = params as GateParameters; + const gate = node as DynamicsCompressorNode; + if (gate.threshold) { + gate.threshold.setValueAtTime(gateParams.threshold || -40, audioContext.currentTime); + gate.ratio.setValueAtTime(1 / (gateParams.ratio || 10), audioContext.currentTime); + gate.attack.setValueAtTime(gateParams.attack || 0.001, audioContext.currentTime); + gate.release.setValueAtTime(gateParams.release || 0.1, audioContext.currentTime); + console.log('[updateEffectParameters] Updated gate params in real-time'); + } + break; + } + + case 'bitcrusher': { + const bitParams = params as BitcrusherParameters; + if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { + (effectNodeInfo.node as any).internalNodes.updateParams(bitParams); + console.log('[updateEffectParameters] Updated bitcrusher params in real-time'); + } + break; + } + + case 'pitch': { + const pitchParams = params as PitchShifterParameters; + if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { + (effectNodeInfo.node as any).internalNodes.updateParams(pitchParams); + console.log('[updateEffectParameters] Updated pitch shifter params in real-time'); + } + break; + } + + case 'timestretch': { + const timeParams = params as TimeStretchParameters; + if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { + (effectNodeInfo.node as any).internalNodes.updateParams(timeParams); + console.log('[updateEffectParameters] Updated time stretch params in real-time'); + } + break; + } + + // For other complex effects, we still need recreation + default: + console.log('[updateEffectParameters] Effect type does not support real-time parameter updates'); + break; + } +} + +/** + * Toggle effect bypass state + */ +export function toggleEffectBypass( + audioContext: AudioContext, + effectNodeInfo: EffectNodeInfo, + enabled: boolean +): void { + if (effectNodeInfo.dryGain && effectNodeInfo.wetGain) { + const now = audioContext.currentTime; + const rampTime = now + 0.01; // 10ms smooth transition + + // Smooth transition to avoid clicks + if (enabled) { + // Enable effect: dry = 0, wet = 1 + effectNodeInfo.dryGain.gain.setValueAtTime(effectNodeInfo.dryGain.gain.value, now); + effectNodeInfo.wetGain.gain.setValueAtTime(effectNodeInfo.wetGain.gain.value, now); + effectNodeInfo.dryGain.gain.linearRampToValueAtTime(0, rampTime); + effectNodeInfo.wetGain.gain.linearRampToValueAtTime(1, rampTime); + } else { + // Bypass effect: dry = 1, wet = 0 + effectNodeInfo.dryGain.gain.setValueAtTime(effectNodeInfo.dryGain.gain.value, now); + effectNodeInfo.wetGain.gain.setValueAtTime(effectNodeInfo.wetGain.gain.value, now); + effectNodeInfo.dryGain.gain.linearRampToValueAtTime(1, rampTime); + effectNodeInfo.wetGain.gain.linearRampToValueAtTime(0, rampTime); + } + console.log('[toggleEffectBypass]', effectNodeInfo.effect.name, 'enabled:', enabled); + } +} + +/** + * Create a Web Audio API node for a specific effect + */ +function createEffectNode( + audioContext: AudioContext, + effect: ChainEffect +): AudioNode | null { + const params = effect.parameters || {}; + + switch (effect.type) { + // Filter effects + case 'lowpass': + case 'highpass': + case 'bandpass': + case 'notch': + case 'lowshelf': + case 'highshelf': + case 'peaking': { + const filterParams = params as FilterOptions; + const filter = audioContext.createBiquadFilter(); + + // Map our effect types to BiquadFilterNode types + const typeMap: Record = { + lowpass: 'lowpass', + highpass: 'highpass', + bandpass: 'bandpass', + notch: 'notch', + lowshelf: 'lowshelf', + highshelf: 'highshelf', + peaking: 'peaking', + }; + + filter.type = typeMap[effect.type]; + filter.frequency.value = filterParams.frequency || 1000; + filter.Q.value = filterParams.Q || 1; + + if (filterParams.gain !== undefined && ['lowshelf', 'highshelf', 'peaking'].includes(effect.type)) { + filter.gain.value = filterParams.gain; + } + + return filter; + } + + // Dynamics - Compressor + case 'compressor': { + const compParams = params as CompressorParameters; + const compressor = audioContext.createDynamicsCompressor(); + + compressor.threshold.value = compParams.threshold || -24; + compressor.ratio.value = compParams.ratio || 4; + compressor.attack.value = compParams.attack || 0.003; + compressor.release.value = compParams.release || 0.25; + compressor.knee.value = compParams.knee || 30; + + return compressor; + } + + // Dynamics - Limiter (using compressor with high ratio) + case 'limiter': { + const limParams = params as LimiterParameters; + const limiter = audioContext.createDynamicsCompressor(); + + limiter.threshold.value = limParams.threshold || -3; + limiter.ratio.value = 20; // High ratio for limiting + limiter.attack.value = 0.001; // Fast attack + limiter.release.value = limParams.release || 0.05; + limiter.knee.value = 0; // Hard knee + + // Apply makeup gain if specified + if (limParams.makeupGain && limParams.makeupGain > 0) { + const makeupGain = audioContext.createGain(); + makeupGain.gain.value = Math.pow(10, limParams.makeupGain / 20); + limiter.connect(makeupGain); + return makeupGain; + } + + return limiter; + } + + // Dynamics - Gate (using compressor with inverse behavior) + case 'gate': { + const gateParams = params as GateParameters; + const gate = audioContext.createDynamicsCompressor(); + + // Configure as an expander/gate + gate.threshold.value = gateParams.threshold || -40; + gate.ratio.value = 1 / (gateParams.ratio || 10); // Inverse ratio for expansion + gate.attack.value = gateParams.attack || 0.001; + gate.release.value = gateParams.release || 0.1; + gate.knee.value = 0; // Hard knee for gating + + return gate; + } + + // Time-based - Delay + case 'delay': { + const delayParams = params as DelayParameters; + const delayNode = audioContext.createDelay(2); // Max 2 seconds + const feedbackNode = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + delayNode.delayTime.value = delayParams.time || 0.5; + feedbackNode.gain.value = delayParams.feedback || 0.3; + wetGain.gain.value = delayParams.mix || 0.5; + dryGain.gain.value = 1 - (delayParams.mix || 0.5); + + // We need to use the inputNode differently - let's create a proper splitter + // This will be called with the previous node as input + // We can't directly split here, so we'll return a custom node structure + + // For now, create a simpler version that works + const splitter = audioContext.createGain(); + + // Wet path: input -> delay -> feedback -> delay (loop) -> wet gain -> output + splitter.connect(delayNode); + delayNode.connect(feedbackNode); + feedbackNode.connect(delayNode); // feedback loop + delayNode.connect(wetGain); + wetGain.connect(output); + + // Dry path: input -> dry gain -> output + splitter.connect(dryGain); + dryGain.connect(output); + + // Return a custom object that behaves like a node + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + internalNodes: { delay: delayNode, feedback: feedbackNode, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Time-based - Reverb (simple convolver-based) + case 'reverb': { + const reverbParams = params as ReverbParameters; + + // Create impulse response for reverb + const sampleRate = audioContext.sampleRate; + const length = sampleRate * (reverbParams.roomSize || 0.5) * 3; // Up to 3 seconds + const impulse = audioContext.createBuffer(2, length, sampleRate); + const impulseL = impulse.getChannelData(0); + const impulseR = impulse.getChannelData(1); + + // Generate impulse response + const decay = 1 - (reverbParams.damping || 0.5); + for (let i = 0; i < length; i++) { + const envelope = Math.pow(1 - i / length, decay * 3); + impulseL[i] = (Math.random() * 2 - 1) * envelope; + impulseR[i] = (Math.random() * 2 - 1) * envelope; + } + + const convolver = audioContext.createConvolver(); + convolver.buffer = impulse; + + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + wetGain.gain.value = reverbParams.mix || 0.3; + dryGain.gain.value = 1 - (reverbParams.mix || 0.3); + + const splitter = audioContext.createGain(); + + // Wet: input -> convolver -> wet gain -> output + splitter.connect(convolver); + convolver.connect(wetGain); + wetGain.connect(output); + + // Dry: input -> dry gain -> output + splitter.connect(dryGain); + dryGain.connect(output); + + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + internalNodes: { convolver, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Time-based - Chorus + case 'chorus': { + const chorusParams = params as ChorusParameters; + + const delay1 = audioContext.createDelay(); + const delay2 = audioContext.createDelay(); + const lfo = audioContext.createOscillator(); + const lfoGain = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + const baseDelay = 0.02; // 20ms base delay + delay1.delayTime.value = baseDelay; + delay2.delayTime.value = baseDelay; + + lfo.frequency.value = chorusParams.rate || 1.5; + lfoGain.gain.value = (chorusParams.depth || 0.002) * 1000; // Convert to ms + + wetGain.gain.value = chorusParams.mix || 0.5; + dryGain.gain.value = 1 - (chorusParams.mix || 0.5); + + // LFO modulates delay time + lfo.connect(lfoGain); + lfoGain.connect(delay1.delayTime); + lfoGain.connect(delay2.delayTime); + lfo.start(); + + const splitter = audioContext.createGain(); + + // Wet path + splitter.connect(delay1); + splitter.connect(delay2); + delay1.connect(wetGain); + delay2.connect(wetGain); + wetGain.connect(output); + + // Dry path + splitter.connect(dryGain); + dryGain.connect(output); + + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + internalNodes: { lfo, lfoGain, delay1, delay2, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Time-based - Flanger + case 'flanger': { + const flangerParams = params as FlangerParameters; + + const delay = audioContext.createDelay(); + const feedback = audioContext.createGain(); + const lfo = audioContext.createOscillator(); + const lfoGain = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + const baseDelay = 0.005; // 5ms base delay + delay.delayTime.value = baseDelay; + + lfo.frequency.value = flangerParams.rate || 0.5; + lfoGain.gain.value = (flangerParams.depth || 0.002) * 1000; + feedback.gain.value = flangerParams.feedback || 0.5; + + wetGain.gain.value = flangerParams.mix || 0.5; + dryGain.gain.value = 1 - (flangerParams.mix || 0.5); + + lfo.connect(lfoGain); + lfoGain.connect(delay.delayTime); + lfo.start(); + + const splitter = audioContext.createGain(); + + // Wet with feedback + splitter.connect(delay); + delay.connect(feedback); + feedback.connect(delay); + delay.connect(wetGain); + wetGain.connect(output); + + // Dry + splitter.connect(dryGain); + dryGain.connect(output); + + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + // Store internal nodes for parameter updates + internalNodes: { lfo, lfoGain, delay, feedback, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Time-based - Phaser + case 'phaser': { + const phaserParams = params as PhaserParameters; + + const stages = phaserParams.stages || 4; + const allpassFilters: BiquadFilterNode[] = []; + const lfo = audioContext.createOscillator(); + const lfoGain = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + lfo.frequency.value = phaserParams.rate || 0.5; + lfoGain.gain.value = (phaserParams.depth || 0.5) * 1000; + wetGain.gain.value = phaserParams.mix || 0.5; + dryGain.gain.value = 1 - (phaserParams.mix || 0.5); + + const splitter = audioContext.createGain(); + let current: AudioNode = splitter; + + // Create allpass filter cascade + for (let i = 0; i < stages; i++) { + const filter = audioContext.createBiquadFilter(); + filter.type = 'allpass'; + filter.frequency.value = 500 + i * 500; + allpassFilters.push(filter); + current.connect(filter); + current = filter; + } + + // LFO modulates all filter frequencies + lfo.connect(lfoGain); + allpassFilters.forEach(filter => { + lfoGain.connect(filter.frequency); + }); + lfo.start(); + + // Wet path + current.connect(wetGain); + wetGain.connect(output); + + // Dry path + splitter.connect(dryGain); + dryGain.connect(output); + + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + internalNodes: { lfo, lfoGain, allpassFilters, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Advanced - Distortion + case 'distortion': { + const distParams = params as any; + + const waveshaper = audioContext.createWaveShaper(); + const preGain = audioContext.createGain(); + const postGain = audioContext.createGain(); + const wetGain = audioContext.createGain(); + const dryGain = audioContext.createGain(); + const output = audioContext.createGain(); + + const drive = (distParams.drive || 0.5) * 50 + 1; + const outputLevel = distParams.output || 0.7; + + // Create distortion curve + const samples = 44100; + const curve = new Float32Array(samples); + const distType = distParams.type || 'soft'; + + for (let i = 0; i < samples; i++) { + const x = (i / samples) * 2 - 1; + const driven = x * drive; + + if (distType === 'soft') { + curve[i] = Math.tanh(driven); + } else if (distType === 'hard') { + curve[i] = Math.max(-1, Math.min(1, driven)); + } else { // tube + curve[i] = driven > 0 ? 1 - Math.exp(-driven) : -1 + Math.exp(driven); + } + } + + waveshaper.curve = curve; + preGain.gain.value = drive; + postGain.gain.value = outputLevel / drive; + wetGain.gain.value = distParams.mix || 1; + dryGain.gain.value = 1 - (distParams.mix || 1); + + const splitter = audioContext.createGain(); + + // Wet path + splitter.connect(preGain); + preGain.connect(waveshaper); + waveshaper.connect(postGain); + postGain.connect(wetGain); + wetGain.connect(output); + + // Dry path + splitter.connect(dryGain); + dryGain.connect(output); + + return { + input: splitter, + output, + connect: (dest: AudioNode) => output.connect(dest), + disconnect: () => output.disconnect(), + internalNodes: { waveshaper, preGain, postGain, wetMix: wetGain, dryMix: dryGain } + } as any; + } + + // Advanced - Bitcrusher (real-time using ScriptProcessorNode) + case 'bitcrusher': { + const bitParams = params as BitcrusherParameters; + + // Use ScriptProcessorNode for real-time bit crushing + const bufferSize = 4096; + const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); + + // Calculate bit depth quantization step + let bitLevels = Math.pow(2, bitParams.bitDepth || 8); + let step = 2 / bitLevels; + + // Calculate sample rate reduction ratio + let srRatio = audioContext.sampleRate / (bitParams.sampleRate || 8000); + let mix = bitParams.mix || 1; + + // Track hold samples for each channel + let holdSamples: number[] = [0, 0]; + let holdCounters: number[] = [0, 0]; + + processor.onaudioprocess = (e) => { + const numChannels = e.inputBuffer.numberOfChannels; + + for (let ch = 0; ch < numChannels; ch++) { + const inputData = e.inputBuffer.getChannelData(ch); + const outputData = e.outputBuffer.getChannelData(ch); + + for (let i = 0; i < bufferSize; i++) { + // Sample rate reduction (sample and hold) + if (holdCounters[ch] <= 0) { + let sample = inputData[i]; + // Bit depth reduction + sample = Math.floor(sample / step) * step; + holdSamples[ch] = sample; + holdCounters[ch] = srRatio; + } + holdCounters[ch]--; + + // Mix dry/wet + outputData[i] = inputData[i] * (1 - mix) + holdSamples[ch] * mix; + } + } + }; + + // Store internal state for parameter updates + (processor as any).internalNodes = { + updateParams: (newParams: BitcrusherParameters) => { + bitLevels = Math.pow(2, newParams.bitDepth || 8); + step = 2 / bitLevels; + srRatio = audioContext.sampleRate / (newParams.sampleRate || 8000); + mix = newParams.mix || 1; + } + }; + + return processor; + } + + // Advanced - Pitch Shifter (dual-tap delay line approach) + case 'pitch': { + const pitchParams = params as PitchShifterParameters; + + const bufferSize = 4096; + const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); + + // Calculate pitch shift ratio from semitones and cents + const totalCents = (pitchParams.semitones || 0) * 100 + (pitchParams.cents || 0); + let pitchRatio = Math.pow(2, totalCents / 1200); + let mix = pitchParams.mix || 1; + + // Delay line parameters + const delayLength = 8192; // Power of 2 for efficient modulo + const grainLength = 2048; + + // State for each channel + interface ChannelState { + delayLine: Float32Array; + writeIndex: number; + readIndex1: number; + readIndex2: number; + crossfade: number; + } + + const channels: ChannelState[] = []; + for (let i = 0; i < 2; i++) { + channels.push({ + delayLine: new Float32Array(delayLength), + writeIndex: 0, + readIndex1: 0, + readIndex2: grainLength / 2, + crossfade: 0 + }); + } + + processor.onaudioprocess = (e) => { + const numChannels = Math.min(e.inputBuffer.numberOfChannels, channels.length); + + for (let ch = 0; ch < numChannels; ch++) { + const inputData = e.inputBuffer.getChannelData(ch); + const outputData = e.outputBuffer.getChannelData(ch); + const state = channels[ch]; + + for (let i = 0; i < bufferSize; i++) { + // Write to delay line + state.delayLine[state.writeIndex] = inputData[i]; + state.writeIndex = (state.writeIndex + 1) % delayLength; + + // Read from two taps with crossfade + const read1 = state.delayLine[Math.floor(state.readIndex1) % delayLength]; + const read2 = state.delayLine[Math.floor(state.readIndex2) % delayLength]; + + // Triangular crossfade window + const fade = state.crossfade / grainLength; + const window1 = 1 - fade; + const window2 = fade; + + const output = read1 * window1 + read2 * window2; + + // Advance read positions + state.readIndex1 += pitchRatio; + state.readIndex2 += pitchRatio; + state.crossfade += 1; + + // Reset crossfade and swap taps when grain is complete + if (state.crossfade >= grainLength) { + state.crossfade = 0; + state.readIndex1 = state.readIndex2; + state.readIndex2 = (state.writeIndex + grainLength / 2) % delayLength; + } + + // Mix dry/wet + outputData[i] = inputData[i] * (1 - mix) + output * mix; + } + } + }; + + // Store internal state for parameter updates + (processor as any).internalNodes = { + updateParams: (newParams: PitchShifterParameters) => { + const totalCents = (newParams.semitones || 0) * 100 + (newParams.cents || 0); + pitchRatio = Math.pow(2, totalCents / 1200); + mix = newParams.mix || 1; + } + }; + + return processor; + } + + // Advanced - Time Stretch (dual overlap-add approach) + case 'timestretch': { + const timeParams = params as TimeStretchParameters; + + const bufferSize = 4096; + const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); + + let rate = timeParams.rate || 1.0; + let mix = timeParams.mix || 1; + + // Time stretch using dual overlapping grains (similar to pitch shifter) + const delayLength = 16384; + const grainSize = 4096; + + interface ChannelState { + delayLine: Float32Array; + writeIndex: number; + readIndex1: number; + readIndex2: number; + grainPhase: number; + } + + const channels: ChannelState[] = []; + for (let i = 0; i < 2; i++) { + channels.push({ + delayLine: new Float32Array(delayLength), + writeIndex: grainSize, // Start with latency + readIndex1: 0, + readIndex2: grainSize / 2, + grainPhase: 0 + }); + } + + processor.onaudioprocess = (e) => { + const numChannels = Math.min(e.inputBuffer.numberOfChannels, channels.length); + + for (let ch = 0; ch < numChannels; ch++) { + const inputData = e.inputBuffer.getChannelData(ch); + const outputData = e.outputBuffer.getChannelData(ch); + const state = channels[ch]; + + for (let i = 0; i < bufferSize; i++) { + // Write to delay line + state.delayLine[state.writeIndex % delayLength] = inputData[i]; + state.writeIndex++; + + // Read from two overlapping grains + const idx1 = Math.floor(state.readIndex1) % delayLength; + const idx2 = Math.floor(state.readIndex2) % delayLength; + + const sample1 = state.delayLine[idx1]; + const sample2 = state.delayLine[idx2]; + + // Crossfade between grains using Hanning windows + const phase = state.grainPhase / grainSize; + const window1 = 0.5 * (1 + Math.cos(Math.PI * phase)); + const window2 = 0.5 * (1 - Math.cos(Math.PI * phase)); + + const output = sample1 * window1 + sample2 * window2; + + // Advance read positions at input rate (no pitch change) + state.readIndex1 += 1.0; + state.readIndex2 += 1.0; + + // Advance grain phase based on time stretch rate + // rate > 1 = slower (stretch), rate < 1 = faster (compress) + state.grainPhase += rate; + + // Reset grain when complete + if (state.grainPhase >= grainSize) { + state.grainPhase = 0; + // Jump to position based on time stretch + state.readIndex1 = state.readIndex2; + state.readIndex2 = (state.readIndex1 + grainSize / 2) % delayLength; + } + + // Mix dry/wet + outputData[i] = inputData[i] * (1 - mix) + output * mix; + } + } + }; + + // Store internal state for parameter updates + (processor as any).internalNodes = { + updateParams: (newParams: TimeStretchParameters) => { + rate = newParams.rate || 1.0; + mix = newParams.mix || 1; + } + }; + + return processor; + } + + default: + return null; + } +} diff --git a/lib/hooks/useMultiTrack.ts b/lib/hooks/useMultiTrack.ts index cdf6efa..aaff2c9 100644 --- a/lib/hooks/useMultiTrack.ts +++ b/lib/hooks/useMultiTrack.ts @@ -25,12 +25,12 @@ export function useMultiTrack() { return []; } - // Note: AudioBuffers and EffectChains can't be serialized, so we only restore track metadata + // Note: AudioBuffers can't be serialized, but EffectChains can return parsed.map((t: any) => ({ ...t, name: String(t.name || 'Untitled Track'), // Ensure name is always a string audioBuffer: null, // Will need to be reloaded - effectChain: createEffectChain(`${t.name} Effects`), // Recreate effect chain + effectChain: t.effectChain || createEffectChain(`${t.name} Effects`), // Restore effect chain or create new })); } } catch (error) { @@ -47,7 +47,7 @@ export function useMultiTrack() { if (typeof window === 'undefined') return; try { - // Only save serializable fields, excluding audioBuffer, effectChain, and any DOM references + // Only save serializable fields, excluding audioBuffer and any DOM references const trackData = tracks.map((track) => ({ id: track.id, name: String(track.name || 'Untitled Track'), @@ -60,7 +60,7 @@ export function useMultiTrack() { recordEnabled: track.recordEnabled, collapsed: track.collapsed, selected: track.selected, - // Note: effectChain is excluded - will be recreated on load + effectChain: track.effectChain, // Save effect chain })); localStorage.setItem(STORAGE_KEY, JSON.stringify(trackData)); } catch (error) { diff --git a/lib/hooks/useMultiTrackPlayer.ts b/lib/hooks/useMultiTrackPlayer.ts index 332c101..598ef11 100644 --- a/lib/hooks/useMultiTrackPlayer.ts +++ b/lib/hooks/useMultiTrackPlayer.ts @@ -2,6 +2,7 @@ import { useState, useCallback, useRef, useEffect } from 'react'; import { getAudioContext } from '@/lib/audio/context'; import type { Track } from '@/types/track'; import { getTrackGain } from '@/lib/audio/track-utils'; +import { applyEffectChain, updateEffectParameters, toggleEffectBypass, type EffectNodeInfo } from '@/lib/audio/effects/processor'; export interface MultiTrackPlayerState { isPlaying: boolean; @@ -18,10 +19,17 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { const sourceNodesRef = useRef([]); const gainNodesRef = useRef([]); const panNodesRef = useRef([]); + const effectNodesRef = useRef([]); // Effect nodes per track const masterGainNodeRef = useRef(null); const startTimeRef = useRef(0); const pausedAtRef = useRef(0); const animationFrameRef = useRef(null); + const tracksRef = useRef(tracks); // Always keep latest tracks + + // Keep tracksRef in sync with tracks prop + useEffect(() => { + tracksRef.current = tracks; + }, [tracks]); // Calculate total duration from all tracks useEffect(() => { @@ -79,6 +87,7 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { sourceNodesRef.current = []; gainNodesRef.current = []; panNodesRef.current = []; + effectNodesRef.current = []; // Create master gain node const masterGain = audioContext.createGain(); @@ -103,10 +112,19 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { // Set pan panNode.pan.setValueAtTime(track.pan, audioContext.currentTime); - // Connect: source -> gain -> pan -> master gain -> destination + // Connect: source -> gain -> pan -> effects -> master gain -> destination source.connect(gainNode); gainNode.connect(panNode); - panNode.connect(masterGain); + + // Apply effect chain + console.log('[MultiTrackPlayer] Applying effect chain for track:', track.name); + console.log('[MultiTrackPlayer] Effect chain ID:', track.effectChain.id); + console.log('[MultiTrackPlayer] Effect chain name:', track.effectChain.name); + console.log('[MultiTrackPlayer] Number of effects:', track.effectChain.effects.length); + console.log('[MultiTrackPlayer] Effects:', track.effectChain.effects); + const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain); + outputNode.connect(masterGain); + console.log('[MultiTrackPlayer] Effect output connected with', effectNodes.length, 'effect nodes'); // Start playback from current position source.start(0, pausedAtRef.current); @@ -115,6 +133,7 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { sourceNodesRef.current.push(source); gainNodesRef.current.push(gainNode); panNodesRef.current.push(panNode); + effectNodesRef.current.push(effectNodes); // Handle ended event source.onended = () => { @@ -188,7 +207,7 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { } }, [isPlaying, play, pause]); - // Update gain/pan when tracks change + // Update gain/pan when tracks change (simple updates that don't require graph rebuild) useEffect(() => { if (!isPlaying || !audioContextRef.current) return; @@ -210,6 +229,215 @@ export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) { }); }, [tracks, isPlaying]); + // Track effect chain structure to detect add/remove operations + const previousEffectStructureRef = useRef(null); + + // Detect effect chain structure changes (add/remove/reorder) and restart + useEffect(() => { + if (!isPlaying || !audioContextRef.current) return; + + // Create a signature of the current effect structure (IDs and count) + const currentStructure = tracks.map(track => + track.effectChain.effects.map(e => e.id).join(',') + ).join('|'); + + // If structure changed (effects added/removed/reordered) while playing, restart + // Don't restart if tracks is empty (intermediate state during updates) + if (previousEffectStructureRef.current !== null && + previousEffectStructureRef.current !== currentStructure && + tracks.length > 0) { + console.log('[useMultiTrackPlayer] Effect chain structure changed, restarting...'); + + // Update the reference immediately to prevent re-triggering + previousEffectStructureRef.current = currentStructure; + + // Update tracksRef with current tracks BEFORE setTimeout + tracksRef.current = tracks; + + // Save current position + const elapsed = audioContextRef.current.currentTime - startTimeRef.current; + const currentPos = pausedAtRef.current + elapsed; + + // Stop all source nodes + sourceNodesRef.current.forEach(node => { + try { + node.onended = null; + node.stop(); + node.disconnect(); + } catch (e) { + // Ignore errors + } + }); + + // Update position + pausedAtRef.current = currentPos; + setCurrentTime(currentPos); + setIsPlaying(false); + + // Clear animation frame + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + animationFrameRef.current = null; + } + + // Restart after a brief delay + setTimeout(() => { + // Use tracksRef.current to get the latest tracks, not the stale closure + const latestTracks = tracksRef.current; + + if (latestTracks.length === 0 || latestTracks.every(t => !t.audioBuffer)) return; + + const audioContext = getAudioContext(); + audioContextRef.current = audioContext; + + // Disconnect old nodes + gainNodesRef.current.forEach(node => node.disconnect()); + panNodesRef.current.forEach(node => node.disconnect()); + effectNodesRef.current.forEach(trackEffects => { + trackEffects.forEach(effectNodeInfo => { + if (effectNodeInfo.node) { + try { + effectNodeInfo.node.disconnect(); + } catch (e) { + // Ignore + } + } + if (effectNodeInfo.dryGain) effectNodeInfo.dryGain.disconnect(); + if (effectNodeInfo.wetGain) effectNodeInfo.wetGain.disconnect(); + }); + }); + if (masterGainNodeRef.current) { + masterGainNodeRef.current.disconnect(); + } + + // Reset refs + sourceNodesRef.current = []; + gainNodesRef.current = []; + panNodesRef.current = []; + effectNodesRef.current = []; + + // Create master gain node + const masterGain = audioContext.createGain(); + masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime); + masterGain.connect(audioContext.destination); + masterGainNodeRef.current = masterGain; + + // Create audio graph for each track + for (const track of latestTracks) { + if (!track.audioBuffer) continue; + + const source = audioContext.createBufferSource(); + source.buffer = track.audioBuffer; + + const gainNode = audioContext.createGain(); + const panNode = audioContext.createStereoPanner(); + + // Set gain based on track volume and solo/mute state + const trackGain = getTrackGain(track, latestTracks); + gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime); + + // Set pan + panNode.pan.setValueAtTime(track.pan, audioContext.currentTime); + + // Connect: source -> gain -> pan -> effects -> master gain -> destination + source.connect(gainNode); + gainNode.connect(panNode); + + // Apply effect chain + const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain); + outputNode.connect(masterGain); + + // Start playback from current position + source.start(0, pausedAtRef.current); + + // Store references + sourceNodesRef.current.push(source); + gainNodesRef.current.push(gainNode); + panNodesRef.current.push(panNode); + effectNodesRef.current.push(effectNodes); + + // Handle ended event + source.onended = () => { + if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) { + setIsPlaying(false); + setCurrentTime(0); + pausedAtRef.current = 0; + } + }; + } + + startTimeRef.current = audioContext.currentTime; + setIsPlaying(true); + + // Start animation frame for position updates + const updatePosition = () => { + if (!audioContextRef.current) return; + + const elapsed = audioContextRef.current.currentTime - startTimeRef.current; + const newTime = pausedAtRef.current + elapsed; + + if (newTime >= duration) { + setIsPlaying(false); + setCurrentTime(0); + pausedAtRef.current = 0; + if (animationFrameRef.current) { + cancelAnimationFrame(animationFrameRef.current); + animationFrameRef.current = null; + } + return; + } + + setCurrentTime(newTime); + animationFrameRef.current = requestAnimationFrame(updatePosition); + }; + updatePosition(); + }, 10); + } + + previousEffectStructureRef.current = currentStructure; + }, [tracks, isPlaying, duration, masterVolume]); + + // Stop playback when all tracks are deleted + useEffect(() => { + if (!isPlaying) return; + + // If tracks become empty or all tracks have no audio buffer, stop playback + if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) { + console.log('[useMultiTrackPlayer] All tracks deleted, stopping playback'); + stop(); + } + }, [tracks, isPlaying, stop]); + + // Update effect parameters and bypass state in real-time + useEffect(() => { + if (!isPlaying || !audioContextRef.current) return; + + tracks.forEach((track, trackIndex) => { + const effectNodes = effectNodesRef.current[trackIndex]; + if (!effectNodes) return; + + // Only update if we have the same number of effects (no add/remove) + if (effectNodes.length !== track.effectChain.effects.length) return; + + track.effectChain.effects.forEach((effect, effectIndex) => { + const effectNodeInfo = effectNodes[effectIndex]; + if (!effectNodeInfo) return; + + // Update bypass state + if (effect.enabled !== effectNodeInfo.effect.enabled) { + toggleEffectBypass(audioContextRef.current!, effectNodeInfo, effect.enabled); + effectNodeInfo.effect.enabled = effect.enabled; + } + + // Update parameters (only works for certain effect types) + if (JSON.stringify(effect.parameters) !== JSON.stringify(effectNodeInfo.effect.parameters)) { + updateEffectParameters(audioContextRef.current!, effectNodeInfo, effect); + effectNodeInfo.effect.parameters = effect.parameters; + } + }); + }); + }, [tracks, isPlaying]); + // Update master volume when it changes useEffect(() => { if (!isPlaying || !audioContextRef.current || !masterGainNodeRef.current) return;