import type { EffectChain, ChainEffect } from './chain'; import type { FilterOptions } from './filters'; import type { CompressorParameters, LimiterParameters, GateParameters, } from './dynamics'; import type { DelayParameters, ReverbParameters, ChorusParameters, FlangerParameters, PhaserParameters, } from './time-based'; import type { DistortionParameters, BitcrusherParameters, PitchShifterParameters, TimeStretchParameters, } from './advanced'; /** * Apply effect chain to audio signal using Web Audio API nodes * Creates and connects audio nodes based on the effect chain * @param audioContext - The Web Audio API context * @param inputNode - The audio node to apply effects to * @param chain - The effect chain configuration * @returns The final output node (or input if no effects) */ export interface EffectNodeInfo { effect: ChainEffect; node: any; // Can be AudioNode or custom structure with input/output dryGain?: GainNode; // Dry signal (bypass) wetGain?: GainNode; // Wet signal (through effect) // Internal nodes for complex effects (for real-time parameter updates) internalNodes?: { lfo?: OscillatorNode; lfoGain?: GainNode; delay?: DelayNode; delay1?: DelayNode; delay2?: DelayNode; feedback?: GainNode; wetMix?: GainNode; dryMix?: GainNode; convolver?: ConvolverNode; allpassFilters?: BiquadFilterNode[]; waveshaper?: WaveShaperNode; preGain?: GainNode; postGain?: GainNode; }; } export function applyEffectChain( audioContext: AudioContext, inputNode: AudioNode, chain: EffectChain ): { outputNode: AudioNode; effectNodes: EffectNodeInfo[] } { let currentNode: AudioNode = inputNode; const effectNodes: EffectNodeInfo[] = []; console.log('[applyEffectChain] Processing', chain.effects.length, 'effects'); // Apply each effect in the chain (we'll handle bypass via gain) for (const effect of chain.effects) { console.log('[applyEffectChain] Effect:', effect.name, 'enabled:', effect.enabled, 'type:', effect.type, 'params:', effect.parameters); const effectNode = createEffectNode(audioContext, effect); console.log('[applyEffectChain] Created effect node:', effectNode ? 'success' : 'null'); if (effectNode) { // Create bypass mechanism using gain nodes const dryGain = audioContext.createGain(); const wetGain = audioContext.createGain(); const output = audioContext.createGain(); // Set bypass state if (effect.enabled) { dryGain.gain.value = 0; // No dry signal wetGain.gain.value = 1; // Full wet signal } else { dryGain.gain.value = 1; // Full dry signal wetGain.gain.value = 0; // No wet signal } // Connect dry path (bypass) currentNode.connect(dryGain); dryGain.connect(output); // Connect wet path (through effect) if ('input' in effectNode && 'output' in effectNode) { currentNode.connect(effectNode.input as AudioNode); (effectNode.output as AudioNode).connect(wetGain); } else { currentNode.connect(effectNode); effectNode.connect(wetGain); } wetGain.connect(output); effectNodes.push({ effect, node: effectNode, dryGain, wetGain, internalNodes: (effectNode as any).internalNodes // Store internal nodes if they exist }); currentNode = output; console.log('[applyEffectChain] Connected with bypass routing'); } } console.log('[applyEffectChain] Returning output node with', effectNodes.length, 'effect nodes'); return { outputNode: currentNode, effectNodes }; } /** * Update effect node parameters in real-time */ export function updateEffectParameters( audioContext: AudioContext, effectNodeInfo: EffectNodeInfo, newEffect: ChainEffect ): void { const node = effectNodeInfo.node; const params = newEffect.parameters || {}; console.log('[updateEffectParameters] Updating', newEffect.type, 'with params:', params); switch (newEffect.type) { // Filters - can update in real-time case 'lowpass': case 'highpass': case 'bandpass': case 'notch': case 'lowshelf': case 'highshelf': case 'peaking': { const filterParams = params as FilterOptions; const filter = node as BiquadFilterNode; if (filter.frequency) { filter.frequency.setValueAtTime(filterParams.frequency || 1000, audioContext.currentTime); filter.Q.setValueAtTime(filterParams.Q || 1, audioContext.currentTime); if (filterParams.gain !== undefined && filter.gain) { filter.gain.setValueAtTime(filterParams.gain, audioContext.currentTime); } console.log('[updateEffectParameters] Updated filter params'); } break; } // Time-based effects with internal nodes case 'delay': { const delayParams = params as DelayParameters; if (effectNodeInfo.internalNodes) { const { delay, feedback, wetMix, dryMix } = effectNodeInfo.internalNodes; if (delay) { delay.delayTime.setValueAtTime(delayParams.time || 0.5, audioContext.currentTime); } if (feedback) { feedback.gain.setValueAtTime(delayParams.feedback || 0.3, audioContext.currentTime); } if (wetMix && dryMix) { const mix = delayParams.mix || 0.5; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } console.log('[updateEffectParameters] Updated delay params in real-time'); } break; } case 'reverb': { const reverbParams = params as ReverbParameters; if (effectNodeInfo.internalNodes) { const { wetMix, dryMix } = effectNodeInfo.internalNodes; // Note: roomSize and damping require impulse response regeneration // For now, we can only update the mix in real-time if (wetMix && dryMix) { const mix = reverbParams.mix || 0.3; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } console.log('[updateEffectParameters] Updated reverb mix in real-time (roomSize/damping require effect recreation)'); } break; } case 'chorus': { const chorusParams = params as ChorusParameters; if (effectNodeInfo.internalNodes) { const { lfo, lfoGain, wetMix, dryMix } = effectNodeInfo.internalNodes; if (lfo) { lfo.frequency.setValueAtTime(chorusParams.rate || 1.5, audioContext.currentTime); } if (lfoGain) { lfoGain.gain.setValueAtTime((chorusParams.depth || 0.002) * 1000, audioContext.currentTime); } if (wetMix && dryMix) { const mix = chorusParams.mix || 0.5; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } console.log('[updateEffectParameters] Updated chorus params in real-time'); } break; } case 'flanger': { const flangerParams = params as FlangerParameters; if (effectNodeInfo.internalNodes) { const { lfo, lfoGain, feedback, wetMix, dryMix } = effectNodeInfo.internalNodes; if (lfo) { lfo.frequency.setValueAtTime(flangerParams.rate || 0.5, audioContext.currentTime); } if (lfoGain) { lfoGain.gain.setValueAtTime((flangerParams.depth || 0.002) * 1000, audioContext.currentTime); } if (feedback) { feedback.gain.setValueAtTime(flangerParams.feedback || 0.5, audioContext.currentTime); } if (wetMix && dryMix) { const mix = flangerParams.mix || 0.5; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } console.log('[updateEffectParameters] Updated flanger params in real-time'); } break; } case 'phaser': { const phaserParams = params as PhaserParameters; if (effectNodeInfo.internalNodes) { const { lfo, lfoGain, allpassFilters, wetMix, dryMix } = effectNodeInfo.internalNodes; if (lfo) { lfo.frequency.setValueAtTime(phaserParams.rate || 0.5, audioContext.currentTime); } if (lfoGain) { lfoGain.gain.setValueAtTime((phaserParams.depth || 0.5) * 1000, audioContext.currentTime); } // Note: Changing stages count requires rebuilding the filter chain if (allpassFilters && phaserParams.stages) { // Update base frequencies for existing filters const stages = Math.min(phaserParams.stages, allpassFilters.length); for (let i = 0; i < stages; i++) { allpassFilters[i].frequency.value = 500 + i * 500; } } if (wetMix && dryMix) { const mix = phaserParams.mix || 0.5; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } console.log('[updateEffectParameters] Updated phaser params in real-time'); } break; } case 'distortion': { const distParams = params as DistortionParameters; if (effectNodeInfo.internalNodes) { const { waveshaper, preGain, postGain, wetMix, dryMix } = effectNodeInfo.internalNodes; // Note: Changing drive or type requires regenerating the waveshaper curve // For now, we can update output level and mix if (postGain) { const outputLevel = distParams.output || 0.7; postGain.gain.setValueAtTime(outputLevel, audioContext.currentTime); } if (wetMix && dryMix) { const mix = distParams.mix || 1; wetMix.gain.setValueAtTime(mix, audioContext.currentTime); dryMix.gain.setValueAtTime(1 - mix, audioContext.currentTime); } // If drive or type changed, we need to regenerate the curve if (waveshaper && preGain && distParams.drive !== undefined) { const drive = (distParams.drive || 0.5) * 50 + 1; const distType = distParams.type || 'soft'; const samples = 44100; const curve = new Float32Array(samples); for (let i = 0; i < samples; i++) { const x = (i / samples) * 2 - 1; const driven = x * drive; if (distType === 'soft') { curve[i] = Math.tanh(driven); } else if (distType === 'hard') { curve[i] = Math.max(-1, Math.min(1, driven)); } else { // tube curve[i] = driven > 0 ? 1 - Math.exp(-driven) : -1 + Math.exp(driven); } } waveshaper.curve = curve; preGain.gain.setValueAtTime(drive, audioContext.currentTime); if (postGain) { const outputLevel = distParams.output || 0.7; postGain.gain.setValueAtTime(outputLevel / drive, audioContext.currentTime); } } console.log('[updateEffectParameters] Updated distortion params in real-time'); } break; } // Dynamics effects case 'compressor': { const compParams = params as CompressorParameters; const compressor = node as DynamicsCompressorNode; if (compressor.threshold) { compressor.threshold.setValueAtTime(compParams.threshold || -24, audioContext.currentTime); compressor.ratio.setValueAtTime(compParams.ratio || 4, audioContext.currentTime); compressor.attack.setValueAtTime(compParams.attack || 0.003, audioContext.currentTime); compressor.release.setValueAtTime(compParams.release || 0.25, audioContext.currentTime); compressor.knee.setValueAtTime(compParams.knee || 30, audioContext.currentTime); console.log('[updateEffectParameters] Updated compressor params in real-time'); } break; } case 'limiter': { const limParams = params as LimiterParameters; const limiter = node as DynamicsCompressorNode; if (limiter.threshold) { limiter.threshold.setValueAtTime(limParams.threshold || -3, audioContext.currentTime); limiter.release.setValueAtTime(limParams.release || 0.05, audioContext.currentTime); console.log('[updateEffectParameters] Updated limiter params in real-time'); } break; } case 'gate': { const gateParams = params as GateParameters; const gate = node as DynamicsCompressorNode; if (gate.threshold) { gate.threshold.setValueAtTime(gateParams.threshold || -40, audioContext.currentTime); gate.ratio.setValueAtTime(1 / (gateParams.ratio || 10), audioContext.currentTime); gate.attack.setValueAtTime(gateParams.attack || 0.001, audioContext.currentTime); gate.release.setValueAtTime(gateParams.release || 0.1, audioContext.currentTime); console.log('[updateEffectParameters] Updated gate params in real-time'); } break; } case 'bitcrusher': { const bitParams = params as BitcrusherParameters; if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { (effectNodeInfo.node as any).internalNodes.updateParams(bitParams); console.log('[updateEffectParameters] Updated bitcrusher params in real-time'); } break; } case 'pitch': { const pitchParams = params as PitchShifterParameters; if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { (effectNodeInfo.node as any).internalNodes.updateParams(pitchParams); console.log('[updateEffectParameters] Updated pitch shifter params in real-time'); } break; } case 'timestretch': { const timeParams = params as TimeStretchParameters; if (effectNodeInfo.node && (effectNodeInfo.node as any).internalNodes?.updateParams) { (effectNodeInfo.node as any).internalNodes.updateParams(timeParams); console.log('[updateEffectParameters] Updated time stretch params in real-time'); } break; } // For other complex effects, we still need recreation default: console.log('[updateEffectParameters] Effect type does not support real-time parameter updates'); break; } } /** * Toggle effect bypass state */ export function toggleEffectBypass( audioContext: AudioContext, effectNodeInfo: EffectNodeInfo, enabled: boolean ): void { if (effectNodeInfo.dryGain && effectNodeInfo.wetGain) { const now = audioContext.currentTime; const rampTime = now + 0.01; // 10ms smooth transition // Smooth transition to avoid clicks if (enabled) { // Enable effect: dry = 0, wet = 1 effectNodeInfo.dryGain.gain.setValueAtTime(effectNodeInfo.dryGain.gain.value, now); effectNodeInfo.wetGain.gain.setValueAtTime(effectNodeInfo.wetGain.gain.value, now); effectNodeInfo.dryGain.gain.linearRampToValueAtTime(0, rampTime); effectNodeInfo.wetGain.gain.linearRampToValueAtTime(1, rampTime); } else { // Bypass effect: dry = 1, wet = 0 effectNodeInfo.dryGain.gain.setValueAtTime(effectNodeInfo.dryGain.gain.value, now); effectNodeInfo.wetGain.gain.setValueAtTime(effectNodeInfo.wetGain.gain.value, now); effectNodeInfo.dryGain.gain.linearRampToValueAtTime(1, rampTime); effectNodeInfo.wetGain.gain.linearRampToValueAtTime(0, rampTime); } console.log('[toggleEffectBypass]', effectNodeInfo.effect.name, 'enabled:', enabled); } } /** * Create a Web Audio API node for a specific effect */ function createEffectNode( audioContext: AudioContext, effect: ChainEffect ): AudioNode | null { const params = effect.parameters || {}; switch (effect.type) { // Filter effects case 'lowpass': case 'highpass': case 'bandpass': case 'notch': case 'lowshelf': case 'highshelf': case 'peaking': { const filterParams = params as FilterOptions; const filter = audioContext.createBiquadFilter(); // Map our effect types to BiquadFilterNode types const typeMap: Record = { lowpass: 'lowpass', highpass: 'highpass', bandpass: 'bandpass', notch: 'notch', lowshelf: 'lowshelf', highshelf: 'highshelf', peaking: 'peaking', }; filter.type = typeMap[effect.type]; filter.frequency.value = filterParams.frequency || 1000; filter.Q.value = filterParams.Q || 1; if (filterParams.gain !== undefined && ['lowshelf', 'highshelf', 'peaking'].includes(effect.type)) { filter.gain.value = filterParams.gain; } return filter; } // Dynamics - Compressor case 'compressor': { const compParams = params as CompressorParameters; const compressor = audioContext.createDynamicsCompressor(); compressor.threshold.value = compParams.threshold || -24; compressor.ratio.value = compParams.ratio || 4; compressor.attack.value = compParams.attack || 0.003; compressor.release.value = compParams.release || 0.25; compressor.knee.value = compParams.knee || 30; return compressor; } // Dynamics - Limiter (using compressor with high ratio) case 'limiter': { const limParams = params as LimiterParameters; const limiter = audioContext.createDynamicsCompressor(); limiter.threshold.value = limParams.threshold || -3; limiter.ratio.value = 20; // High ratio for limiting limiter.attack.value = 0.001; // Fast attack limiter.release.value = limParams.release || 0.05; limiter.knee.value = 0; // Hard knee // Apply makeup gain if specified if (limParams.makeupGain && limParams.makeupGain > 0) { const makeupGain = audioContext.createGain(); makeupGain.gain.value = Math.pow(10, limParams.makeupGain / 20); limiter.connect(makeupGain); return makeupGain; } return limiter; } // Dynamics - Gate (using compressor with inverse behavior) case 'gate': { const gateParams = params as GateParameters; const gate = audioContext.createDynamicsCompressor(); // Configure as an expander/gate gate.threshold.value = gateParams.threshold || -40; gate.ratio.value = 1 / (gateParams.ratio || 10); // Inverse ratio for expansion gate.attack.value = gateParams.attack || 0.001; gate.release.value = gateParams.release || 0.1; gate.knee.value = 0; // Hard knee for gating return gate; } // Time-based - Delay case 'delay': { const delayParams = params as DelayParameters; const delayNode = audioContext.createDelay(2); // Max 2 seconds const feedbackNode = audioContext.createGain(); const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); delayNode.delayTime.value = delayParams.time || 0.5; feedbackNode.gain.value = delayParams.feedback || 0.3; wetGain.gain.value = delayParams.mix || 0.5; dryGain.gain.value = 1 - (delayParams.mix || 0.5); // We need to use the inputNode differently - let's create a proper splitter // This will be called with the previous node as input // We can't directly split here, so we'll return a custom node structure // For now, create a simpler version that works const splitter = audioContext.createGain(); // Wet path: input -> delay -> feedback -> delay (loop) -> wet gain -> output splitter.connect(delayNode); delayNode.connect(feedbackNode); feedbackNode.connect(delayNode); // feedback loop delayNode.connect(wetGain); wetGain.connect(output); // Dry path: input -> dry gain -> output splitter.connect(dryGain); dryGain.connect(output); // Return a custom object that behaves like a node return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), internalNodes: { delay: delayNode, feedback: feedbackNode, wetMix: wetGain, dryMix: dryGain } } as any; } // Time-based - Reverb (simple convolver-based) case 'reverb': { const reverbParams = params as ReverbParameters; // Create impulse response for reverb const sampleRate = audioContext.sampleRate; const length = sampleRate * (reverbParams.roomSize || 0.5) * 3; // Up to 3 seconds const impulse = audioContext.createBuffer(2, length, sampleRate); const impulseL = impulse.getChannelData(0); const impulseR = impulse.getChannelData(1); // Generate impulse response const decay = 1 - (reverbParams.damping || 0.5); for (let i = 0; i < length; i++) { const envelope = Math.pow(1 - i / length, decay * 3); impulseL[i] = (Math.random() * 2 - 1) * envelope; impulseR[i] = (Math.random() * 2 - 1) * envelope; } const convolver = audioContext.createConvolver(); convolver.buffer = impulse; const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); wetGain.gain.value = reverbParams.mix || 0.3; dryGain.gain.value = 1 - (reverbParams.mix || 0.3); const splitter = audioContext.createGain(); // Wet: input -> convolver -> wet gain -> output splitter.connect(convolver); convolver.connect(wetGain); wetGain.connect(output); // Dry: input -> dry gain -> output splitter.connect(dryGain); dryGain.connect(output); return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), internalNodes: { convolver, wetMix: wetGain, dryMix: dryGain } } as any; } // Time-based - Chorus case 'chorus': { const chorusParams = params as ChorusParameters; const delay1 = audioContext.createDelay(); const delay2 = audioContext.createDelay(); const lfo = audioContext.createOscillator(); const lfoGain = audioContext.createGain(); const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); const baseDelay = 0.02; // 20ms base delay delay1.delayTime.value = baseDelay; delay2.delayTime.value = baseDelay; lfo.frequency.value = chorusParams.rate || 1.5; lfoGain.gain.value = (chorusParams.depth || 0.002) * 1000; // Convert to ms wetGain.gain.value = chorusParams.mix || 0.5; dryGain.gain.value = 1 - (chorusParams.mix || 0.5); // LFO modulates delay time lfo.connect(lfoGain); lfoGain.connect(delay1.delayTime); lfoGain.connect(delay2.delayTime); lfo.start(); const splitter = audioContext.createGain(); // Wet path splitter.connect(delay1); splitter.connect(delay2); delay1.connect(wetGain); delay2.connect(wetGain); wetGain.connect(output); // Dry path splitter.connect(dryGain); dryGain.connect(output); return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), internalNodes: { lfo, lfoGain, delay1, delay2, wetMix: wetGain, dryMix: dryGain } } as any; } // Time-based - Flanger case 'flanger': { const flangerParams = params as FlangerParameters; const delay = audioContext.createDelay(); const feedback = audioContext.createGain(); const lfo = audioContext.createOscillator(); const lfoGain = audioContext.createGain(); const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); const baseDelay = 0.005; // 5ms base delay delay.delayTime.value = baseDelay; lfo.frequency.value = flangerParams.rate || 0.5; lfoGain.gain.value = (flangerParams.depth || 0.002) * 1000; feedback.gain.value = flangerParams.feedback || 0.5; wetGain.gain.value = flangerParams.mix || 0.5; dryGain.gain.value = 1 - (flangerParams.mix || 0.5); lfo.connect(lfoGain); lfoGain.connect(delay.delayTime); lfo.start(); const splitter = audioContext.createGain(); // Wet with feedback splitter.connect(delay); delay.connect(feedback); feedback.connect(delay); delay.connect(wetGain); wetGain.connect(output); // Dry splitter.connect(dryGain); dryGain.connect(output); return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), // Store internal nodes for parameter updates internalNodes: { lfo, lfoGain, delay, feedback, wetMix: wetGain, dryMix: dryGain } } as any; } // Time-based - Phaser case 'phaser': { const phaserParams = params as PhaserParameters; const stages = phaserParams.stages || 4; const allpassFilters: BiquadFilterNode[] = []; const lfo = audioContext.createOscillator(); const lfoGain = audioContext.createGain(); const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); lfo.frequency.value = phaserParams.rate || 0.5; lfoGain.gain.value = (phaserParams.depth || 0.5) * 1000; wetGain.gain.value = phaserParams.mix || 0.5; dryGain.gain.value = 1 - (phaserParams.mix || 0.5); const splitter = audioContext.createGain(); let current: AudioNode = splitter; // Create allpass filter cascade for (let i = 0; i < stages; i++) { const filter = audioContext.createBiquadFilter(); filter.type = 'allpass'; filter.frequency.value = 500 + i * 500; allpassFilters.push(filter); current.connect(filter); current = filter; } // LFO modulates all filter frequencies lfo.connect(lfoGain); allpassFilters.forEach(filter => { lfoGain.connect(filter.frequency); }); lfo.start(); // Wet path current.connect(wetGain); wetGain.connect(output); // Dry path splitter.connect(dryGain); dryGain.connect(output); return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), internalNodes: { lfo, lfoGain, allpassFilters, wetMix: wetGain, dryMix: dryGain } } as any; } // Advanced - Distortion case 'distortion': { const distParams = params as any; const waveshaper = audioContext.createWaveShaper(); const preGain = audioContext.createGain(); const postGain = audioContext.createGain(); const wetGain = audioContext.createGain(); const dryGain = audioContext.createGain(); const output = audioContext.createGain(); const drive = (distParams.drive || 0.5) * 50 + 1; const outputLevel = distParams.output || 0.7; // Create distortion curve const samples = 44100; const curve = new Float32Array(samples); const distType = distParams.type || 'soft'; for (let i = 0; i < samples; i++) { const x = (i / samples) * 2 - 1; const driven = x * drive; if (distType === 'soft') { curve[i] = Math.tanh(driven); } else if (distType === 'hard') { curve[i] = Math.max(-1, Math.min(1, driven)); } else { // tube curve[i] = driven > 0 ? 1 - Math.exp(-driven) : -1 + Math.exp(driven); } } waveshaper.curve = curve; preGain.gain.value = drive; postGain.gain.value = outputLevel / drive; wetGain.gain.value = distParams.mix || 1; dryGain.gain.value = 1 - (distParams.mix || 1); const splitter = audioContext.createGain(); // Wet path splitter.connect(preGain); preGain.connect(waveshaper); waveshaper.connect(postGain); postGain.connect(wetGain); wetGain.connect(output); // Dry path splitter.connect(dryGain); dryGain.connect(output); return { input: splitter, output, connect: (dest: AudioNode) => output.connect(dest), disconnect: () => output.disconnect(), internalNodes: { waveshaper, preGain, postGain, wetMix: wetGain, dryMix: dryGain } } as any; } // Advanced - Bitcrusher (real-time using ScriptProcessorNode) case 'bitcrusher': { const bitParams = params as BitcrusherParameters; // Use ScriptProcessorNode for real-time bit crushing const bufferSize = 4096; const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); // Calculate bit depth quantization step let bitLevels = Math.pow(2, bitParams.bitDepth || 8); let step = 2 / bitLevels; // Calculate sample rate reduction ratio let srRatio = audioContext.sampleRate / (bitParams.sampleRate || 8000); let mix = bitParams.mix || 1; // Track hold samples for each channel let holdSamples: number[] = [0, 0]; let holdCounters: number[] = [0, 0]; processor.onaudioprocess = (e) => { const numChannels = e.inputBuffer.numberOfChannels; for (let ch = 0; ch < numChannels; ch++) { const inputData = e.inputBuffer.getChannelData(ch); const outputData = e.outputBuffer.getChannelData(ch); for (let i = 0; i < bufferSize; i++) { // Sample rate reduction (sample and hold) if (holdCounters[ch] <= 0) { let sample = inputData[i]; // Bit depth reduction sample = Math.floor(sample / step) * step; holdSamples[ch] = sample; holdCounters[ch] = srRatio; } holdCounters[ch]--; // Mix dry/wet outputData[i] = inputData[i] * (1 - mix) + holdSamples[ch] * mix; } } }; // Store internal state for parameter updates (processor as any).internalNodes = { updateParams: (newParams: BitcrusherParameters) => { bitLevels = Math.pow(2, newParams.bitDepth || 8); step = 2 / bitLevels; srRatio = audioContext.sampleRate / (newParams.sampleRate || 8000); mix = newParams.mix || 1; } }; return processor; } // Advanced - Pitch Shifter (dual-tap delay line approach) case 'pitch': { const pitchParams = params as PitchShifterParameters; const bufferSize = 4096; const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); // Calculate pitch shift ratio from semitones and cents const totalCents = (pitchParams.semitones || 0) * 100 + (pitchParams.cents || 0); let pitchRatio = Math.pow(2, totalCents / 1200); let mix = pitchParams.mix || 1; // Delay line parameters const delayLength = 8192; // Power of 2 for efficient modulo const grainLength = 2048; // State for each channel interface ChannelState { delayLine: Float32Array; writeIndex: number; readIndex1: number; readIndex2: number; crossfade: number; } const channels: ChannelState[] = []; for (let i = 0; i < 2; i++) { channels.push({ delayLine: new Float32Array(delayLength), writeIndex: 0, readIndex1: 0, readIndex2: grainLength / 2, crossfade: 0 }); } processor.onaudioprocess = (e) => { const numChannels = Math.min(e.inputBuffer.numberOfChannels, channels.length); for (let ch = 0; ch < numChannels; ch++) { const inputData = e.inputBuffer.getChannelData(ch); const outputData = e.outputBuffer.getChannelData(ch); const state = channels[ch]; for (let i = 0; i < bufferSize; i++) { // Write to delay line state.delayLine[state.writeIndex] = inputData[i]; state.writeIndex = (state.writeIndex + 1) % delayLength; // Read from two taps with crossfade const read1 = state.delayLine[Math.floor(state.readIndex1) % delayLength]; const read2 = state.delayLine[Math.floor(state.readIndex2) % delayLength]; // Triangular crossfade window const fade = state.crossfade / grainLength; const window1 = 1 - fade; const window2 = fade; const output = read1 * window1 + read2 * window2; // Advance read positions state.readIndex1 += pitchRatio; state.readIndex2 += pitchRatio; state.crossfade += 1; // Reset crossfade and swap taps when grain is complete if (state.crossfade >= grainLength) { state.crossfade = 0; state.readIndex1 = state.readIndex2; state.readIndex2 = (state.writeIndex + grainLength / 2) % delayLength; } // Mix dry/wet outputData[i] = inputData[i] * (1 - mix) + output * mix; } } }; // Store internal state for parameter updates (processor as any).internalNodes = { updateParams: (newParams: PitchShifterParameters) => { const totalCents = (newParams.semitones || 0) * 100 + (newParams.cents || 0); pitchRatio = Math.pow(2, totalCents / 1200); mix = newParams.mix || 1; } }; return processor; } // Advanced - Time Stretch (dual overlap-add approach) case 'timestretch': { const timeParams = params as TimeStretchParameters; const bufferSize = 4096; const processor = audioContext.createScriptProcessor(bufferSize, 2, 2); let rate = timeParams.rate || 1.0; let mix = timeParams.mix || 1; // Time stretch using dual overlapping grains (similar to pitch shifter) const delayLength = 16384; const grainSize = 4096; interface ChannelState { delayLine: Float32Array; writeIndex: number; readIndex1: number; readIndex2: number; grainPhase: number; } const channels: ChannelState[] = []; for (let i = 0; i < 2; i++) { channels.push({ delayLine: new Float32Array(delayLength), writeIndex: grainSize, // Start with latency readIndex1: 0, readIndex2: grainSize / 2, grainPhase: 0 }); } processor.onaudioprocess = (e) => { const numChannels = Math.min(e.inputBuffer.numberOfChannels, channels.length); for (let ch = 0; ch < numChannels; ch++) { const inputData = e.inputBuffer.getChannelData(ch); const outputData = e.outputBuffer.getChannelData(ch); const state = channels[ch]; for (let i = 0; i < bufferSize; i++) { // Write to delay line state.delayLine[state.writeIndex % delayLength] = inputData[i]; state.writeIndex++; // Read from two overlapping grains const idx1 = Math.floor(state.readIndex1) % delayLength; const idx2 = Math.floor(state.readIndex2) % delayLength; const sample1 = state.delayLine[idx1]; const sample2 = state.delayLine[idx2]; // Crossfade between grains using Hanning windows const phase = state.grainPhase / grainSize; const window1 = 0.5 * (1 + Math.cos(Math.PI * phase)); const window2 = 0.5 * (1 - Math.cos(Math.PI * phase)); const output = sample1 * window1 + sample2 * window2; // Advance read positions at input rate (no pitch change) state.readIndex1 += 1.0; state.readIndex2 += 1.0; // Advance grain phase based on time stretch rate // rate > 1 = slower (stretch), rate < 1 = faster (compress) state.grainPhase += rate; // Reset grain when complete if (state.grainPhase >= grainSize) { state.grainPhase = 0; // Jump to position based on time stretch state.readIndex1 = state.readIndex2; state.readIndex2 = (state.readIndex1 + grainSize / 2) % delayLength; } // Mix dry/wet outputData[i] = inputData[i] * (1 - mix) + output * mix; } } }; // Store internal state for parameter updates (processor as any).internalNodes = { updateParams: (newParams: TimeStretchParameters) => { rate = newParams.rate || 1.0; mix = newParams.mix || 1; } }; return processor; } default: return null; } }