Implemented comprehensive real-time effect processing for multi-track audio: Core Features: - Per-track effect chains with drag-and-drop reordering - Effect bypass/enable toggle per effect - Real-time parameter updates (filters, dynamics, time-based, distortion, bitcrusher, pitch, timestretch) - Add/remove effects during playback without interruption - Effect chain persistence via localStorage - Automatic playback stop when tracks are deleted Technical Implementation: - Effect processor with dry/wet routing for bypass functionality - Real-time effect parameter updates using AudioParam setValueAtTime - Structure change detection for add/remove/reorder operations - Stale closure fix using refs for latest track state - ScriptProcessorNode for bitcrusher, pitch shifter, and time stretch - Dual-tap delay line for pitch shifting - Overlap-add synthesis for time stretching UI Components: - EffectBrowser dialog with categorized effects - EffectDevice component with parameter controls - EffectParameters for all 19 real-time effect types - Device rack with horizontal scrolling (Ableton-style) Removed offline-only effects (normalize, fadeIn, fadeOut, reverse) as they don't fit the real-time processing model. Completed all items in Phase 7.4: - [x] Per-track effect chain - [x] Effect rack UI - [x] Effect bypass per track - [x] Real-time effect processing during playback - [x] Add/remove effects during playback - [x] Real-time parameter updates - [x] Effect chain persistence 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
484 lines
16 KiB
TypeScript
484 lines
16 KiB
TypeScript
import { useState, useCallback, useRef, useEffect } from 'react';
|
|
import { getAudioContext } from '@/lib/audio/context';
|
|
import type { Track } from '@/types/track';
|
|
import { getTrackGain } from '@/lib/audio/track-utils';
|
|
import { applyEffectChain, updateEffectParameters, toggleEffectBypass, type EffectNodeInfo } from '@/lib/audio/effects/processor';
|
|
|
|
export interface MultiTrackPlayerState {
|
|
isPlaying: boolean;
|
|
currentTime: number;
|
|
duration: number;
|
|
}
|
|
|
|
export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) {
|
|
const [isPlaying, setIsPlaying] = useState(false);
|
|
const [currentTime, setCurrentTime] = useState(0);
|
|
const [duration, setDuration] = useState(0);
|
|
|
|
const audioContextRef = useRef<AudioContext | null>(null);
|
|
const sourceNodesRef = useRef<AudioBufferSourceNode[]>([]);
|
|
const gainNodesRef = useRef<GainNode[]>([]);
|
|
const panNodesRef = useRef<StereoPannerNode[]>([]);
|
|
const effectNodesRef = useRef<EffectNodeInfo[][]>([]); // Effect nodes per track
|
|
const masterGainNodeRef = useRef<GainNode | null>(null);
|
|
const startTimeRef = useRef<number>(0);
|
|
const pausedAtRef = useRef<number>(0);
|
|
const animationFrameRef = useRef<number | null>(null);
|
|
const tracksRef = useRef<Track[]>(tracks); // Always keep latest tracks
|
|
|
|
// Keep tracksRef in sync with tracks prop
|
|
useEffect(() => {
|
|
tracksRef.current = tracks;
|
|
}, [tracks]);
|
|
|
|
// Calculate total duration from all tracks
|
|
useEffect(() => {
|
|
let maxDuration = 0;
|
|
for (const track of tracks) {
|
|
if (track.audioBuffer) {
|
|
maxDuration = Math.max(maxDuration, track.audioBuffer.duration);
|
|
}
|
|
}
|
|
setDuration(maxDuration);
|
|
}, [tracks]);
|
|
|
|
const updatePlaybackPosition = useCallback(() => {
|
|
if (!audioContextRef.current) return;
|
|
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const newTime = pausedAtRef.current + elapsed;
|
|
|
|
if (newTime >= duration) {
|
|
setIsPlaying(false);
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
return;
|
|
}
|
|
|
|
setCurrentTime(newTime);
|
|
animationFrameRef.current = requestAnimationFrame(updatePlaybackPosition);
|
|
}, [duration]);
|
|
|
|
const play = useCallback(() => {
|
|
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) return;
|
|
|
|
const audioContext = getAudioContext();
|
|
audioContextRef.current = audioContext;
|
|
|
|
// Stop any existing playback
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors from already stopped nodes
|
|
}
|
|
});
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
|
|
sourceNodesRef.current = [];
|
|
gainNodesRef.current = [];
|
|
panNodesRef.current = [];
|
|
effectNodesRef.current = [];
|
|
|
|
// Create master gain node
|
|
const masterGain = audioContext.createGain();
|
|
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
|
|
masterGain.connect(audioContext.destination);
|
|
masterGainNodeRef.current = masterGain;
|
|
|
|
// Create audio graph for each track
|
|
for (const track of tracks) {
|
|
if (!track.audioBuffer) continue;
|
|
|
|
const source = audioContext.createBufferSource();
|
|
source.buffer = track.audioBuffer;
|
|
|
|
const gainNode = audioContext.createGain();
|
|
const panNode = audioContext.createStereoPanner();
|
|
|
|
// Set gain based on track volume and solo/mute state
|
|
const trackGain = getTrackGain(track, tracks);
|
|
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
|
|
|
|
// Set pan
|
|
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
|
|
|
|
// Connect: source -> gain -> pan -> effects -> master gain -> destination
|
|
source.connect(gainNode);
|
|
gainNode.connect(panNode);
|
|
|
|
// Apply effect chain
|
|
console.log('[MultiTrackPlayer] Applying effect chain for track:', track.name);
|
|
console.log('[MultiTrackPlayer] Effect chain ID:', track.effectChain.id);
|
|
console.log('[MultiTrackPlayer] Effect chain name:', track.effectChain.name);
|
|
console.log('[MultiTrackPlayer] Number of effects:', track.effectChain.effects.length);
|
|
console.log('[MultiTrackPlayer] Effects:', track.effectChain.effects);
|
|
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
|
|
outputNode.connect(masterGain);
|
|
console.log('[MultiTrackPlayer] Effect output connected with', effectNodes.length, 'effect nodes');
|
|
|
|
// Start playback from current position
|
|
source.start(0, pausedAtRef.current);
|
|
|
|
// Store references
|
|
sourceNodesRef.current.push(source);
|
|
gainNodesRef.current.push(gainNode);
|
|
panNodesRef.current.push(panNode);
|
|
effectNodesRef.current.push(effectNodes);
|
|
|
|
// Handle ended event
|
|
source.onended = () => {
|
|
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
|
|
setIsPlaying(false);
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
}
|
|
};
|
|
}
|
|
|
|
startTimeRef.current = audioContext.currentTime;
|
|
setIsPlaying(true);
|
|
updatePlaybackPosition();
|
|
}, [tracks, duration, masterVolume, updatePlaybackPosition]);
|
|
|
|
const pause = useCallback(() => {
|
|
if (!audioContextRef.current || !isPlaying) return;
|
|
|
|
// Stop all source nodes
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors
|
|
}
|
|
});
|
|
|
|
// Update paused position
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
pausedAtRef.current = Math.min(pausedAtRef.current + elapsed, duration);
|
|
setCurrentTime(pausedAtRef.current);
|
|
|
|
setIsPlaying(false);
|
|
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
}, [isPlaying, duration]);
|
|
|
|
const stop = useCallback(() => {
|
|
pause();
|
|
pausedAtRef.current = 0;
|
|
setCurrentTime(0);
|
|
}, [pause]);
|
|
|
|
const seek = useCallback((time: number) => {
|
|
const wasPlaying = isPlaying;
|
|
|
|
if (wasPlaying) {
|
|
pause();
|
|
}
|
|
|
|
const clampedTime = Math.max(0, Math.min(time, duration));
|
|
pausedAtRef.current = clampedTime;
|
|
setCurrentTime(clampedTime);
|
|
|
|
if (wasPlaying) {
|
|
// Small delay to ensure state is updated
|
|
setTimeout(() => play(), 10);
|
|
}
|
|
}, [isPlaying, duration, pause, play]);
|
|
|
|
const togglePlayPause = useCallback(() => {
|
|
if (isPlaying) {
|
|
pause();
|
|
} else {
|
|
play();
|
|
}
|
|
}, [isPlaying, play, pause]);
|
|
|
|
// Update gain/pan when tracks change (simple updates that don't require graph rebuild)
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
tracks.forEach((track, index) => {
|
|
if (gainNodesRef.current[index]) {
|
|
const trackGain = getTrackGain(track, tracks);
|
|
gainNodesRef.current[index].gain.setValueAtTime(
|
|
trackGain,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
|
|
if (panNodesRef.current[index]) {
|
|
panNodesRef.current[index].pan.setValueAtTime(
|
|
track.pan,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
});
|
|
}, [tracks, isPlaying]);
|
|
|
|
// Track effect chain structure to detect add/remove operations
|
|
const previousEffectStructureRef = useRef<string | null>(null);
|
|
|
|
// Detect effect chain structure changes (add/remove/reorder) and restart
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
// Create a signature of the current effect structure (IDs and count)
|
|
const currentStructure = tracks.map(track =>
|
|
track.effectChain.effects.map(e => e.id).join(',')
|
|
).join('|');
|
|
|
|
// If structure changed (effects added/removed/reordered) while playing, restart
|
|
// Don't restart if tracks is empty (intermediate state during updates)
|
|
if (previousEffectStructureRef.current !== null &&
|
|
previousEffectStructureRef.current !== currentStructure &&
|
|
tracks.length > 0) {
|
|
console.log('[useMultiTrackPlayer] Effect chain structure changed, restarting...');
|
|
|
|
// Update the reference immediately to prevent re-triggering
|
|
previousEffectStructureRef.current = currentStructure;
|
|
|
|
// Update tracksRef with current tracks BEFORE setTimeout
|
|
tracksRef.current = tracks;
|
|
|
|
// Save current position
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const currentPos = pausedAtRef.current + elapsed;
|
|
|
|
// Stop all source nodes
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.onended = null;
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors
|
|
}
|
|
});
|
|
|
|
// Update position
|
|
pausedAtRef.current = currentPos;
|
|
setCurrentTime(currentPos);
|
|
setIsPlaying(false);
|
|
|
|
// Clear animation frame
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
|
|
// Restart after a brief delay
|
|
setTimeout(() => {
|
|
// Use tracksRef.current to get the latest tracks, not the stale closure
|
|
const latestTracks = tracksRef.current;
|
|
|
|
if (latestTracks.length === 0 || latestTracks.every(t => !t.audioBuffer)) return;
|
|
|
|
const audioContext = getAudioContext();
|
|
audioContextRef.current = audioContext;
|
|
|
|
// Disconnect old nodes
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
effectNodesRef.current.forEach(trackEffects => {
|
|
trackEffects.forEach(effectNodeInfo => {
|
|
if (effectNodeInfo.node) {
|
|
try {
|
|
effectNodeInfo.node.disconnect();
|
|
} catch (e) {
|
|
// Ignore
|
|
}
|
|
}
|
|
if (effectNodeInfo.dryGain) effectNodeInfo.dryGain.disconnect();
|
|
if (effectNodeInfo.wetGain) effectNodeInfo.wetGain.disconnect();
|
|
});
|
|
});
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
|
|
// Reset refs
|
|
sourceNodesRef.current = [];
|
|
gainNodesRef.current = [];
|
|
panNodesRef.current = [];
|
|
effectNodesRef.current = [];
|
|
|
|
// Create master gain node
|
|
const masterGain = audioContext.createGain();
|
|
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
|
|
masterGain.connect(audioContext.destination);
|
|
masterGainNodeRef.current = masterGain;
|
|
|
|
// Create audio graph for each track
|
|
for (const track of latestTracks) {
|
|
if (!track.audioBuffer) continue;
|
|
|
|
const source = audioContext.createBufferSource();
|
|
source.buffer = track.audioBuffer;
|
|
|
|
const gainNode = audioContext.createGain();
|
|
const panNode = audioContext.createStereoPanner();
|
|
|
|
// Set gain based on track volume and solo/mute state
|
|
const trackGain = getTrackGain(track, latestTracks);
|
|
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
|
|
|
|
// Set pan
|
|
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
|
|
|
|
// Connect: source -> gain -> pan -> effects -> master gain -> destination
|
|
source.connect(gainNode);
|
|
gainNode.connect(panNode);
|
|
|
|
// Apply effect chain
|
|
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
|
|
outputNode.connect(masterGain);
|
|
|
|
// Start playback from current position
|
|
source.start(0, pausedAtRef.current);
|
|
|
|
// Store references
|
|
sourceNodesRef.current.push(source);
|
|
gainNodesRef.current.push(gainNode);
|
|
panNodesRef.current.push(panNode);
|
|
effectNodesRef.current.push(effectNodes);
|
|
|
|
// Handle ended event
|
|
source.onended = () => {
|
|
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
|
|
setIsPlaying(false);
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
}
|
|
};
|
|
}
|
|
|
|
startTimeRef.current = audioContext.currentTime;
|
|
setIsPlaying(true);
|
|
|
|
// Start animation frame for position updates
|
|
const updatePosition = () => {
|
|
if (!audioContextRef.current) return;
|
|
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const newTime = pausedAtRef.current + elapsed;
|
|
|
|
if (newTime >= duration) {
|
|
setIsPlaying(false);
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
return;
|
|
}
|
|
|
|
setCurrentTime(newTime);
|
|
animationFrameRef.current = requestAnimationFrame(updatePosition);
|
|
};
|
|
updatePosition();
|
|
}, 10);
|
|
}
|
|
|
|
previousEffectStructureRef.current = currentStructure;
|
|
}, [tracks, isPlaying, duration, masterVolume]);
|
|
|
|
// Stop playback when all tracks are deleted
|
|
useEffect(() => {
|
|
if (!isPlaying) return;
|
|
|
|
// If tracks become empty or all tracks have no audio buffer, stop playback
|
|
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) {
|
|
console.log('[useMultiTrackPlayer] All tracks deleted, stopping playback');
|
|
stop();
|
|
}
|
|
}, [tracks, isPlaying, stop]);
|
|
|
|
// Update effect parameters and bypass state in real-time
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
tracks.forEach((track, trackIndex) => {
|
|
const effectNodes = effectNodesRef.current[trackIndex];
|
|
if (!effectNodes) return;
|
|
|
|
// Only update if we have the same number of effects (no add/remove)
|
|
if (effectNodes.length !== track.effectChain.effects.length) return;
|
|
|
|
track.effectChain.effects.forEach((effect, effectIndex) => {
|
|
const effectNodeInfo = effectNodes[effectIndex];
|
|
if (!effectNodeInfo) return;
|
|
|
|
// Update bypass state
|
|
if (effect.enabled !== effectNodeInfo.effect.enabled) {
|
|
toggleEffectBypass(audioContextRef.current!, effectNodeInfo, effect.enabled);
|
|
effectNodeInfo.effect.enabled = effect.enabled;
|
|
}
|
|
|
|
// Update parameters (only works for certain effect types)
|
|
if (JSON.stringify(effect.parameters) !== JSON.stringify(effectNodeInfo.effect.parameters)) {
|
|
updateEffectParameters(audioContextRef.current!, effectNodeInfo, effect);
|
|
effectNodeInfo.effect.parameters = effect.parameters;
|
|
}
|
|
});
|
|
});
|
|
}, [tracks, isPlaying]);
|
|
|
|
// Update master volume when it changes
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current || !masterGainNodeRef.current) return;
|
|
|
|
masterGainNodeRef.current.gain.setValueAtTime(
|
|
masterVolume,
|
|
audioContextRef.current.currentTime
|
|
);
|
|
}, [masterVolume, isPlaying]);
|
|
|
|
// Cleanup on unmount
|
|
useEffect(() => {
|
|
return () => {
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
}
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore
|
|
}
|
|
});
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
};
|
|
}, []);
|
|
|
|
return {
|
|
isPlaying,
|
|
currentTime,
|
|
duration,
|
|
play,
|
|
pause,
|
|
stop,
|
|
seek,
|
|
togglePlayPause,
|
|
};
|
|
}
|