Added complete loop functionality with UI controls: - Loop state management in useMultiTrackPlayer (loopEnabled, loopStart, loopEnd) - Automatic restart from loop start when reaching loop end during playback - Loop toggle button in PlaybackControls with Repeat icon - Loop points UI showing when loop is enabled (similar to punch in/out) - Manual loop point adjustment with number inputs - Quick set buttons to set loop points to current time - Wired loop functionality through AudioEditor component 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
928 lines
32 KiB
TypeScript
928 lines
32 KiB
TypeScript
import { useState, useCallback, useRef, useEffect } from 'react';
|
|
import { getAudioContext } from '@/lib/audio/context';
|
|
import type { Track } from '@/types/track';
|
|
import { getTrackGain } from '@/lib/audio/track-utils';
|
|
import { applyEffectChain, updateEffectParameters, toggleEffectBypass, type EffectNodeInfo } from '@/lib/audio/effects/processor';
|
|
import { evaluateAutomationLinear } from '@/lib/audio/automation-utils';
|
|
|
|
export interface MultiTrackPlayerState {
|
|
isPlaying: boolean;
|
|
currentTime: number;
|
|
duration: number;
|
|
loopEnabled: boolean;
|
|
loopStart: number;
|
|
loopEnd: number;
|
|
}
|
|
|
|
export interface TrackLevel {
|
|
trackId: string;
|
|
level: number;
|
|
}
|
|
|
|
export interface AutomationRecordingCallback {
|
|
(trackId: string, laneId: string, currentTime: number, value: number): void;
|
|
}
|
|
|
|
export function useMultiTrackPlayer(
|
|
tracks: Track[],
|
|
masterVolume: number = 1,
|
|
onRecordAutomation?: AutomationRecordingCallback
|
|
) {
|
|
const [isPlaying, setIsPlaying] = useState(false);
|
|
const [currentTime, setCurrentTime] = useState(0);
|
|
const [duration, setDuration] = useState(0);
|
|
const [trackLevels, setTrackLevels] = useState<Record<string, number>>({});
|
|
const [masterPeakLevel, setMasterPeakLevel] = useState(0);
|
|
const [masterRmsLevel, setMasterRmsLevel] = useState(0);
|
|
const [masterIsClipping, setMasterIsClipping] = useState(false);
|
|
const [loopEnabled, setLoopEnabled] = useState(false);
|
|
const [loopStart, setLoopStart] = useState(0);
|
|
const [loopEnd, setLoopEnd] = useState(0);
|
|
|
|
const audioContextRef = useRef<AudioContext | null>(null);
|
|
const sourceNodesRef = useRef<AudioBufferSourceNode[]>([]);
|
|
const gainNodesRef = useRef<GainNode[]>([]);
|
|
const panNodesRef = useRef<StereoPannerNode[]>([]);
|
|
const analyserNodesRef = useRef<AnalyserNode[]>([]);
|
|
const effectNodesRef = useRef<EffectNodeInfo[][]>([]); // Effect nodes per track
|
|
const masterGainNodeRef = useRef<GainNode | null>(null);
|
|
const masterAnalyserRef = useRef<AnalyserNode | null>(null);
|
|
const masterLevelMonitorFrameRef = useRef<number | null>(null);
|
|
const startTimeRef = useRef<number>(0);
|
|
const pausedAtRef = useRef<number>(0);
|
|
const animationFrameRef = useRef<number | null>(null);
|
|
const levelMonitorFrameRef = useRef<number | null>(null);
|
|
const automationFrameRef = useRef<number | null>(null);
|
|
const isMonitoringLevelsRef = useRef<boolean>(false);
|
|
const tracksRef = useRef<Track[]>(tracks); // Always keep latest tracks
|
|
const lastRecordedValuesRef = useRef<Map<string, number>>(new Map()); // Track last recorded values to detect changes
|
|
const onRecordAutomationRef = useRef<AutomationRecordingCallback | undefined>(onRecordAutomation);
|
|
const loopEnabledRef = useRef<boolean>(false);
|
|
const loopStartRef = useRef<number>(0);
|
|
const loopEndRef = useRef<number>(0);
|
|
|
|
// Keep tracksRef in sync with tracks prop
|
|
useEffect(() => {
|
|
tracksRef.current = tracks;
|
|
}, [tracks]);
|
|
|
|
// Keep loop refs in sync with state
|
|
useEffect(() => {
|
|
loopEnabledRef.current = loopEnabled;
|
|
loopStartRef.current = loopStart;
|
|
loopEndRef.current = loopEnd;
|
|
}, [loopEnabled, loopStart, loopEnd]);
|
|
|
|
// Keep onRecordAutomationRef in sync
|
|
useEffect(() => {
|
|
onRecordAutomationRef.current = onRecordAutomation;
|
|
}, [onRecordAutomation]);
|
|
|
|
// Calculate total duration from all tracks
|
|
useEffect(() => {
|
|
let maxDuration = 0;
|
|
for (const track of tracks) {
|
|
if (track.audioBuffer) {
|
|
maxDuration = Math.max(maxDuration, track.audioBuffer.duration);
|
|
}
|
|
}
|
|
setDuration(maxDuration);
|
|
// Initialize loop end to duration when duration changes
|
|
if (loopEnd === 0 || loopEnd > maxDuration) {
|
|
setLoopEnd(maxDuration);
|
|
}
|
|
}, [tracks, loopEnd]);
|
|
|
|
// Convert linear amplitude to dB scale normalized to 0-1 range
|
|
const linearToDbScale = (linear: number): number => {
|
|
if (linear === 0) return 0;
|
|
|
|
// Convert to dB (20 * log10(linear))
|
|
const db = 20 * Math.log10(linear);
|
|
|
|
// Normalize -60dB to 0dB range to 0-1
|
|
// -60dB or lower = 0%, 0dB = 100%
|
|
const minDb = -60;
|
|
const maxDb = 0;
|
|
const normalized = (db - minDb) / (maxDb - minDb);
|
|
|
|
// Clamp to 0-1 range
|
|
return Math.max(0, Math.min(1, normalized));
|
|
};
|
|
|
|
// Monitor playback levels for all tracks
|
|
const monitorPlaybackLevels = useCallback(() => {
|
|
if (!isMonitoringLevelsRef.current || analyserNodesRef.current.length === 0) return;
|
|
|
|
const levels: Record<string, number> = {};
|
|
|
|
analyserNodesRef.current.forEach((analyser, index) => {
|
|
const track = tracksRef.current[index];
|
|
if (!track) return;
|
|
|
|
const dataArray = new Float32Array(analyser.fftSize);
|
|
analyser.getFloatTimeDomainData(dataArray);
|
|
|
|
// Calculate peak level using float data (-1 to +1 range)
|
|
let peak = 0;
|
|
for (let i = 0; i < dataArray.length; i++) {
|
|
const abs = Math.abs(dataArray[i]);
|
|
if (abs > peak) {
|
|
peak = abs;
|
|
}
|
|
}
|
|
|
|
// Store raw linear peak (will be converted to dB in the fader component)
|
|
levels[track.id] = peak;
|
|
});
|
|
|
|
setTrackLevels(levels);
|
|
|
|
levelMonitorFrameRef.current = requestAnimationFrame(monitorPlaybackLevels);
|
|
}, []);
|
|
|
|
// Monitor master output levels (peak and RMS)
|
|
const monitorMasterLevels = useCallback(() => {
|
|
if (!masterAnalyserRef.current) {
|
|
return;
|
|
}
|
|
|
|
const analyser = masterAnalyserRef.current;
|
|
const bufferLength = analyser.fftSize;
|
|
const dataArray = new Float32Array(bufferLength);
|
|
|
|
analyser.getFloatTimeDomainData(dataArray);
|
|
|
|
// Calculate peak level (max absolute value)
|
|
let peak = 0;
|
|
for (let i = 0; i < bufferLength; i++) {
|
|
const abs = Math.abs(dataArray[i]);
|
|
if (abs > peak) {
|
|
peak = abs;
|
|
}
|
|
}
|
|
|
|
// Calculate RMS level (root mean square)
|
|
let sumSquares = 0;
|
|
for (let i = 0; i < bufferLength; i++) {
|
|
sumSquares += dataArray[i] * dataArray[i];
|
|
}
|
|
const rms = Math.sqrt(sumSquares / bufferLength);
|
|
|
|
// Detect clipping (signal >= 1.0)
|
|
const isClipping = peak >= 1.0;
|
|
|
|
setMasterPeakLevel(peak);
|
|
setMasterRmsLevel(rms);
|
|
if (isClipping) {
|
|
setMasterIsClipping(true);
|
|
}
|
|
|
|
masterLevelMonitorFrameRef.current = requestAnimationFrame(monitorMasterLevels);
|
|
}, []);
|
|
|
|
// Apply automation values during playback
|
|
const applyAutomation = useCallback(() => {
|
|
if (!audioContextRef.current) return;
|
|
|
|
const currentTime = pausedAtRef.current + (audioContextRef.current.currentTime - startTimeRef.current);
|
|
|
|
tracksRef.current.forEach((track, index) => {
|
|
// Apply volume automation
|
|
const volumeLane = track.automation.lanes.find(lane => lane.parameterId === 'volume');
|
|
if (volumeLane) {
|
|
let volumeValue: number | undefined;
|
|
|
|
// In write mode, record current track volume (only if value changed)
|
|
if (volumeLane.mode === 'write' && onRecordAutomationRef.current) {
|
|
volumeValue = track.volume;
|
|
const lastValue = lastRecordedValuesRef.current.get(`${track.id}-volume`);
|
|
|
|
// Only record if value has changed
|
|
if (lastValue === undefined || Math.abs(lastValue - volumeValue) > 0.001) {
|
|
lastRecordedValuesRef.current.set(`${track.id}-volume`, volumeValue);
|
|
onRecordAutomationRef.current(track.id, volumeLane.id, currentTime, volumeValue);
|
|
}
|
|
} else if (volumeLane.points.length > 0) {
|
|
// Otherwise play back automation
|
|
volumeValue = evaluateAutomationLinear(volumeLane.points, currentTime);
|
|
}
|
|
|
|
if (volumeValue !== undefined && gainNodesRef.current[index]) {
|
|
const trackGain = getTrackGain(track, tracks);
|
|
// Apply both track gain (mute/solo) and automated volume
|
|
gainNodesRef.current[index].gain.setValueAtTime(
|
|
trackGain * volumeValue,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
}
|
|
|
|
// Apply pan automation
|
|
const panLane = track.automation.lanes.find(lane => lane.parameterId === 'pan');
|
|
if (panLane) {
|
|
let automatedValue: number | undefined;
|
|
|
|
// In write mode, record current track pan (only if value changed)
|
|
if (panLane.mode === 'write' && onRecordAutomationRef.current) {
|
|
automatedValue = (track.pan + 1) / 2; // Convert -1 to 1 -> 0 to 1
|
|
const lastValue = lastRecordedValuesRef.current.get(`${track.id}-pan`);
|
|
|
|
// Only record if value has changed
|
|
if (lastValue === undefined || Math.abs(lastValue - automatedValue) > 0.001) {
|
|
lastRecordedValuesRef.current.set(`${track.id}-pan`, automatedValue);
|
|
onRecordAutomationRef.current(track.id, panLane.id, currentTime, automatedValue);
|
|
}
|
|
} else if (panLane.points.length > 0) {
|
|
// Otherwise play back automation
|
|
automatedValue = evaluateAutomationLinear(panLane.points, currentTime);
|
|
}
|
|
|
|
if (automatedValue !== undefined && panNodesRef.current[index]) {
|
|
// Pan automation values are 0-1, but StereoPannerNode expects -1 to 1
|
|
const panValue = (automatedValue * 2) - 1;
|
|
panNodesRef.current[index].pan.setValueAtTime(
|
|
panValue,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
}
|
|
|
|
// Apply effect parameter automation
|
|
track.automation.lanes.forEach(lane => {
|
|
// Check if this is an effect parameter (format: effect.{effectId}.{parameterName})
|
|
if (lane.parameterId.startsWith('effect.')) {
|
|
const parts = lane.parameterId.split('.');
|
|
if (parts.length === 3) {
|
|
const effectId = parts[1];
|
|
const paramName = parts[2];
|
|
|
|
// Find the effect in the track's effect chain
|
|
const effectIndex = track.effectChain.effects.findIndex(e => e.id === effectId);
|
|
const effect = track.effectChain.effects[effectIndex];
|
|
|
|
if (effectIndex >= 0 && effect) {
|
|
let automatedValue: number | undefined;
|
|
|
|
// In write mode, record current effect parameter value (only if value changed)
|
|
if (lane.mode === 'write' && onRecordAutomationRef.current && effect.parameters) {
|
|
const currentValue = (effect.parameters as any)[paramName];
|
|
if (currentValue !== undefined) {
|
|
// Normalize value to 0-1 range
|
|
const range = lane.valueRange.max - lane.valueRange.min;
|
|
const normalizedValue = (currentValue - lane.valueRange.min) / range;
|
|
|
|
const lastValue = lastRecordedValuesRef.current.get(`${track.id}-effect-${effectId}-${paramName}`);
|
|
|
|
// Only record if value has changed
|
|
if (lastValue === undefined || Math.abs(lastValue - normalizedValue) > 0.001) {
|
|
lastRecordedValuesRef.current.set(`${track.id}-effect-${effectId}-${paramName}`, normalizedValue);
|
|
onRecordAutomationRef.current(track.id, lane.id, currentTime, normalizedValue);
|
|
}
|
|
}
|
|
} else if (lane.points.length > 0) {
|
|
// Otherwise play back automation
|
|
automatedValue = evaluateAutomationLinear(lane.points, currentTime);
|
|
}
|
|
|
|
// Apply the automated value to the effect
|
|
if (automatedValue !== undefined && effectNodesRef.current[index] && effectNodesRef.current[index][effectIndex]) {
|
|
const effectNodeInfo = effectNodesRef.current[index][effectIndex];
|
|
|
|
// Convert normalized 0-1 value to actual parameter range
|
|
const actualValue = lane.valueRange.min + (automatedValue * (lane.valueRange.max - lane.valueRange.min));
|
|
|
|
// Update the effect parameter
|
|
if (effect.parameters) {
|
|
const updatedParams = { ...effect.parameters, [paramName]: actualValue } as any;
|
|
updateEffectParameters(audioContextRef.current!, effectNodeInfo, {
|
|
...effect,
|
|
parameters: updatedParams
|
|
});
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
});
|
|
});
|
|
|
|
automationFrameRef.current = requestAnimationFrame(applyAutomation);
|
|
}, []);
|
|
|
|
const updatePlaybackPosition = useCallback(() => {
|
|
if (!audioContextRef.current) return;
|
|
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const newTime = pausedAtRef.current + elapsed;
|
|
|
|
// Check if loop is enabled and we've reached the loop end
|
|
if (loopEnabledRef.current && loopEndRef.current > loopStartRef.current && newTime >= loopEndRef.current) {
|
|
// Loop back to start
|
|
pausedAtRef.current = loopStartRef.current;
|
|
startTimeRef.current = audioContextRef.current.currentTime;
|
|
setCurrentTime(loopStartRef.current);
|
|
|
|
// Restart all sources from loop start
|
|
sourceNodesRef.current.forEach((node, index) => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors from already stopped nodes
|
|
}
|
|
});
|
|
|
|
// Re-trigger play from loop start
|
|
const tracks = tracksRef.current;
|
|
const audioContext = audioContextRef.current;
|
|
|
|
// Clear old sources
|
|
sourceNodesRef.current = [];
|
|
|
|
// Create new sources starting from loop start
|
|
for (const track of tracks) {
|
|
if (!track.audioBuffer) continue;
|
|
|
|
const source = audioContext.createBufferSource();
|
|
source.buffer = track.audioBuffer;
|
|
|
|
// Connect to existing nodes (gain, pan, effects are still connected)
|
|
const trackIndex = tracks.indexOf(track);
|
|
source.connect(analyserNodesRef.current[trackIndex]);
|
|
|
|
// Start from loop start position
|
|
source.start(0, loopStartRef.current);
|
|
sourceNodesRef.current.push(source);
|
|
}
|
|
|
|
animationFrameRef.current = requestAnimationFrame(updatePlaybackPosition);
|
|
return;
|
|
}
|
|
|
|
if (newTime >= duration) {
|
|
setIsPlaying(false);
|
|
isMonitoringLevelsRef.current = false;
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
setTrackLevels({});
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
if (levelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(levelMonitorFrameRef.current);
|
|
levelMonitorFrameRef.current = null;
|
|
}
|
|
if (automationFrameRef.current) {
|
|
cancelAnimationFrame(automationFrameRef.current);
|
|
automationFrameRef.current = null;
|
|
}
|
|
return;
|
|
}
|
|
|
|
setCurrentTime(newTime);
|
|
animationFrameRef.current = requestAnimationFrame(updatePlaybackPosition);
|
|
}, [duration]);
|
|
|
|
const play = useCallback(() => {
|
|
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) return;
|
|
|
|
const audioContext = getAudioContext();
|
|
audioContextRef.current = audioContext;
|
|
|
|
// Stop any existing playback
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors from already stopped nodes
|
|
}
|
|
});
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
|
|
sourceNodesRef.current = [];
|
|
gainNodesRef.current = [];
|
|
panNodesRef.current = [];
|
|
analyserNodesRef.current = [];
|
|
effectNodesRef.current = [];
|
|
|
|
// Create master gain node with analyser for metering
|
|
const masterGain = audioContext.createGain();
|
|
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
|
|
|
|
const masterAnalyser = audioContext.createAnalyser();
|
|
masterAnalyser.fftSize = 256;
|
|
masterAnalyser.smoothingTimeConstant = 0.8;
|
|
|
|
// Connect: masterGain -> analyser -> destination
|
|
masterGain.connect(masterAnalyser);
|
|
masterAnalyser.connect(audioContext.destination);
|
|
|
|
masterGainNodeRef.current = masterGain;
|
|
masterAnalyserRef.current = masterAnalyser;
|
|
|
|
// Create audio graph for each track
|
|
for (const track of tracks) {
|
|
if (!track.audioBuffer) continue;
|
|
|
|
const source = audioContext.createBufferSource();
|
|
source.buffer = track.audioBuffer;
|
|
|
|
const gainNode = audioContext.createGain();
|
|
const panNode = audioContext.createStereoPanner();
|
|
const analyserNode = audioContext.createAnalyser();
|
|
analyserNode.fftSize = 256;
|
|
analyserNode.smoothingTimeConstant = 0.8;
|
|
|
|
// Set gain based on track volume and solo/mute state
|
|
const trackGain = getTrackGain(track, tracks);
|
|
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
|
|
|
|
// Set pan
|
|
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
|
|
|
|
// Connect: source -> analyser -> gain -> pan -> effects -> master gain -> destination
|
|
// Analyser is before gain so it shows raw audio levels independent of volume fader
|
|
source.connect(analyserNode);
|
|
analyserNode.connect(gainNode);
|
|
gainNode.connect(panNode);
|
|
|
|
// Apply effect chain
|
|
console.log('[MultiTrackPlayer] Applying effect chain for track:', track.name);
|
|
console.log('[MultiTrackPlayer] Effect chain ID:', track.effectChain.id);
|
|
console.log('[MultiTrackPlayer] Effect chain name:', track.effectChain.name);
|
|
console.log('[MultiTrackPlayer] Number of effects:', track.effectChain.effects.length);
|
|
console.log('[MultiTrackPlayer] Effects:', track.effectChain.effects);
|
|
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
|
|
|
|
// Connect to master gain
|
|
outputNode.connect(masterGain);
|
|
console.log('[MultiTrackPlayer] Effect output connected with', effectNodes.length, 'effect nodes');
|
|
|
|
// Start playback from current position
|
|
source.start(0, pausedAtRef.current);
|
|
|
|
// Store references
|
|
sourceNodesRef.current.push(source);
|
|
gainNodesRef.current.push(gainNode);
|
|
panNodesRef.current.push(panNode);
|
|
analyserNodesRef.current.push(analyserNode);
|
|
effectNodesRef.current.push(effectNodes);
|
|
|
|
// Handle ended event
|
|
source.onended = () => {
|
|
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
|
|
setIsPlaying(false);
|
|
isMonitoringLevelsRef.current = false;
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
setTrackLevels({});
|
|
}
|
|
};
|
|
}
|
|
|
|
startTimeRef.current = audioContext.currentTime;
|
|
setIsPlaying(true);
|
|
updatePlaybackPosition();
|
|
|
|
// Start level monitoring
|
|
isMonitoringLevelsRef.current = true;
|
|
monitorPlaybackLevels();
|
|
monitorMasterLevels();
|
|
|
|
// Start automation
|
|
applyAutomation();
|
|
}, [tracks, duration, masterVolume, updatePlaybackPosition, monitorPlaybackLevels, monitorMasterLevels, applyAutomation]);
|
|
|
|
const pause = useCallback(() => {
|
|
if (!audioContextRef.current || !isPlaying) return;
|
|
|
|
// Stop all source nodes
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors
|
|
}
|
|
});
|
|
|
|
// Update paused position
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
pausedAtRef.current = Math.min(pausedAtRef.current + elapsed, duration);
|
|
setCurrentTime(pausedAtRef.current);
|
|
|
|
setIsPlaying(false);
|
|
|
|
// Stop level monitoring
|
|
isMonitoringLevelsRef.current = false;
|
|
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
|
|
if (levelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(levelMonitorFrameRef.current);
|
|
levelMonitorFrameRef.current = null;
|
|
}
|
|
|
|
if (masterLevelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(masterLevelMonitorFrameRef.current);
|
|
masterLevelMonitorFrameRef.current = null;
|
|
}
|
|
|
|
if (automationFrameRef.current) {
|
|
cancelAnimationFrame(automationFrameRef.current);
|
|
automationFrameRef.current = null;
|
|
}
|
|
|
|
// Clear track levels
|
|
setTrackLevels({});
|
|
}, [isPlaying, duration]);
|
|
|
|
const stop = useCallback(() => {
|
|
pause();
|
|
pausedAtRef.current = 0;
|
|
setCurrentTime(0);
|
|
// Clear last recorded values when stopping
|
|
lastRecordedValuesRef.current.clear();
|
|
}, [pause]);
|
|
|
|
const seek = useCallback((time: number) => {
|
|
const wasPlaying = isPlaying;
|
|
|
|
if (wasPlaying) {
|
|
pause();
|
|
}
|
|
|
|
const clampedTime = Math.max(0, Math.min(time, duration));
|
|
pausedAtRef.current = clampedTime;
|
|
setCurrentTime(clampedTime);
|
|
|
|
if (wasPlaying) {
|
|
// Small delay to ensure state is updated
|
|
setTimeout(() => play(), 10);
|
|
}
|
|
}, [isPlaying, duration, pause, play]);
|
|
|
|
const togglePlayPause = useCallback(() => {
|
|
if (isPlaying) {
|
|
pause();
|
|
} else {
|
|
play();
|
|
}
|
|
}, [isPlaying, play, pause]);
|
|
|
|
// Update gain/pan when tracks change (simple updates that don't require graph rebuild)
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
tracks.forEach((track, index) => {
|
|
if (gainNodesRef.current[index]) {
|
|
const trackGain = getTrackGain(track, tracks);
|
|
gainNodesRef.current[index].gain.setValueAtTime(
|
|
trackGain,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
|
|
if (panNodesRef.current[index]) {
|
|
panNodesRef.current[index].pan.setValueAtTime(
|
|
track.pan,
|
|
audioContextRef.current!.currentTime
|
|
);
|
|
}
|
|
});
|
|
}, [tracks, isPlaying]);
|
|
|
|
// Track effect chain structure to detect add/remove operations
|
|
const previousEffectStructureRef = useRef<string | null>(null);
|
|
|
|
// Detect effect chain structure changes (add/remove/reorder) and restart
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
// Create a signature of the current effect structure (IDs and count)
|
|
const currentStructure = tracks.map(track =>
|
|
track.effectChain.effects.map(e => e.id).join(',')
|
|
).join('|');
|
|
|
|
// If structure changed (effects added/removed/reordered) while playing, restart
|
|
// Don't restart if tracks is empty (intermediate state during updates)
|
|
if (previousEffectStructureRef.current !== null &&
|
|
previousEffectStructureRef.current !== currentStructure &&
|
|
tracks.length > 0) {
|
|
console.log('[useMultiTrackPlayer] Effect chain structure changed, restarting...');
|
|
|
|
// Update the reference immediately to prevent re-triggering
|
|
previousEffectStructureRef.current = currentStructure;
|
|
|
|
// Update tracksRef with current tracks BEFORE setTimeout
|
|
tracksRef.current = tracks;
|
|
|
|
// Save current position
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const currentPos = pausedAtRef.current + elapsed;
|
|
|
|
// Stop all source nodes
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.onended = null;
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore errors
|
|
}
|
|
});
|
|
|
|
// Update position
|
|
pausedAtRef.current = currentPos;
|
|
setCurrentTime(currentPos);
|
|
setIsPlaying(false);
|
|
|
|
// Clear animation frame
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
|
|
// Restart after a brief delay
|
|
setTimeout(() => {
|
|
// Use tracksRef.current to get the latest tracks, not the stale closure
|
|
const latestTracks = tracksRef.current;
|
|
|
|
if (latestTracks.length === 0 || latestTracks.every(t => !t.audioBuffer)) return;
|
|
|
|
const audioContext = getAudioContext();
|
|
audioContextRef.current = audioContext;
|
|
|
|
// Disconnect old nodes
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
effectNodesRef.current.forEach(trackEffects => {
|
|
trackEffects.forEach(effectNodeInfo => {
|
|
if (effectNodeInfo.node) {
|
|
try {
|
|
effectNodeInfo.node.disconnect();
|
|
} catch (e) {
|
|
// Ignore
|
|
}
|
|
}
|
|
if (effectNodeInfo.dryGain) effectNodeInfo.dryGain.disconnect();
|
|
if (effectNodeInfo.wetGain) effectNodeInfo.wetGain.disconnect();
|
|
});
|
|
});
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
|
|
// Reset refs
|
|
sourceNodesRef.current = [];
|
|
gainNodesRef.current = [];
|
|
panNodesRef.current = [];
|
|
analyserNodesRef.current = [];
|
|
effectNodesRef.current = [];
|
|
|
|
// Create master gain node
|
|
const masterGain = audioContext.createGain();
|
|
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
|
|
masterGain.connect(audioContext.destination);
|
|
masterGainNodeRef.current = masterGain;
|
|
|
|
// Create audio graph for each track
|
|
for (const track of latestTracks) {
|
|
if (!track.audioBuffer) continue;
|
|
|
|
const source = audioContext.createBufferSource();
|
|
source.buffer = track.audioBuffer;
|
|
|
|
const gainNode = audioContext.createGain();
|
|
const panNode = audioContext.createStereoPanner();
|
|
const analyserNode = audioContext.createAnalyser();
|
|
analyserNode.fftSize = 256;
|
|
analyserNode.smoothingTimeConstant = 0.8;
|
|
|
|
// Set gain based on track volume and solo/mute state
|
|
const trackGain = getTrackGain(track, latestTracks);
|
|
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
|
|
|
|
// Set pan
|
|
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
|
|
|
|
// Connect: source -> analyser -> gain -> pan -> effects -> master gain -> destination
|
|
// Analyser is before gain so it shows raw audio levels independent of volume fader
|
|
source.connect(analyserNode);
|
|
analyserNode.connect(gainNode);
|
|
gainNode.connect(panNode);
|
|
|
|
// Apply effect chain
|
|
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
|
|
outputNode.connect(masterGain);
|
|
|
|
// Start playback from current position
|
|
source.start(0, pausedAtRef.current);
|
|
|
|
// Store references
|
|
sourceNodesRef.current.push(source);
|
|
gainNodesRef.current.push(gainNode);
|
|
panNodesRef.current.push(panNode);
|
|
analyserNodesRef.current.push(analyserNode);
|
|
effectNodesRef.current.push(effectNodes);
|
|
|
|
// Handle ended event
|
|
source.onended = () => {
|
|
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
|
|
setIsPlaying(false);
|
|
isMonitoringLevelsRef.current = false;
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
setTrackLevels({});
|
|
}
|
|
};
|
|
}
|
|
|
|
startTimeRef.current = audioContext.currentTime;
|
|
setIsPlaying(true);
|
|
|
|
// Start level monitoring
|
|
isMonitoringLevelsRef.current = true;
|
|
|
|
// Start animation frame for position updates
|
|
const updatePosition = () => {
|
|
if (!audioContextRef.current) return;
|
|
|
|
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
|
|
const newTime = pausedAtRef.current + elapsed;
|
|
|
|
if (newTime >= duration) {
|
|
setIsPlaying(false);
|
|
isMonitoringLevelsRef.current = false;
|
|
setCurrentTime(0);
|
|
pausedAtRef.current = 0;
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
animationFrameRef.current = null;
|
|
}
|
|
if (levelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(levelMonitorFrameRef.current);
|
|
levelMonitorFrameRef.current = null;
|
|
}
|
|
if (masterLevelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(masterLevelMonitorFrameRef.current);
|
|
masterLevelMonitorFrameRef.current = null;
|
|
}
|
|
if (automationFrameRef.current) {
|
|
cancelAnimationFrame(automationFrameRef.current);
|
|
automationFrameRef.current = null;
|
|
}
|
|
return;
|
|
}
|
|
|
|
setCurrentTime(newTime);
|
|
animationFrameRef.current = requestAnimationFrame(updatePosition);
|
|
};
|
|
updatePosition();
|
|
monitorPlaybackLevels();
|
|
monitorMasterLevels();
|
|
applyAutomation();
|
|
}, 10);
|
|
}
|
|
|
|
previousEffectStructureRef.current = currentStructure;
|
|
}, [tracks, isPlaying, duration, masterVolume, monitorPlaybackLevels, monitorMasterLevels, applyAutomation]);
|
|
|
|
// Stop playback when all tracks are deleted
|
|
useEffect(() => {
|
|
if (!isPlaying) return;
|
|
|
|
// If tracks become empty or all tracks have no audio buffer, stop playback
|
|
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) {
|
|
console.log('[useMultiTrackPlayer] All tracks deleted, stopping playback');
|
|
stop();
|
|
}
|
|
}, [tracks, isPlaying, stop]);
|
|
|
|
// Update effect parameters and bypass state in real-time
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current) return;
|
|
|
|
tracks.forEach((track, trackIndex) => {
|
|
const effectNodes = effectNodesRef.current[trackIndex];
|
|
if (!effectNodes) return;
|
|
|
|
// Only update if we have the same number of effects (no add/remove)
|
|
if (effectNodes.length !== track.effectChain.effects.length) return;
|
|
|
|
track.effectChain.effects.forEach((effect, effectIndex) => {
|
|
const effectNodeInfo = effectNodes[effectIndex];
|
|
if (!effectNodeInfo) return;
|
|
|
|
// Update bypass state
|
|
if (effect.enabled !== effectNodeInfo.effect.enabled) {
|
|
toggleEffectBypass(audioContextRef.current!, effectNodeInfo, effect.enabled);
|
|
effectNodeInfo.effect.enabled = effect.enabled;
|
|
}
|
|
|
|
// Update parameters (only works for certain effect types)
|
|
if (JSON.stringify(effect.parameters) !== JSON.stringify(effectNodeInfo.effect.parameters)) {
|
|
updateEffectParameters(audioContextRef.current!, effectNodeInfo, effect);
|
|
effectNodeInfo.effect.parameters = effect.parameters;
|
|
}
|
|
});
|
|
});
|
|
}, [tracks, isPlaying]);
|
|
|
|
// Update master volume when it changes
|
|
useEffect(() => {
|
|
if (!isPlaying || !audioContextRef.current || !masterGainNodeRef.current) return;
|
|
|
|
masterGainNodeRef.current.gain.setValueAtTime(
|
|
masterVolume,
|
|
audioContextRef.current.currentTime
|
|
);
|
|
}, [masterVolume, isPlaying]);
|
|
|
|
// Cleanup on unmount
|
|
useEffect(() => {
|
|
return () => {
|
|
isMonitoringLevelsRef.current = false;
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
}
|
|
if (levelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(levelMonitorFrameRef.current);
|
|
}
|
|
if (masterLevelMonitorFrameRef.current) {
|
|
cancelAnimationFrame(masterLevelMonitorFrameRef.current);
|
|
}
|
|
if (automationFrameRef.current) {
|
|
cancelAnimationFrame(automationFrameRef.current);
|
|
}
|
|
sourceNodesRef.current.forEach(node => {
|
|
try {
|
|
node.stop();
|
|
node.disconnect();
|
|
} catch (e) {
|
|
// Ignore
|
|
}
|
|
});
|
|
gainNodesRef.current.forEach(node => node.disconnect());
|
|
panNodesRef.current.forEach(node => node.disconnect());
|
|
analyserNodesRef.current.forEach(node => node.disconnect());
|
|
if (masterGainNodeRef.current) {
|
|
masterGainNodeRef.current.disconnect();
|
|
}
|
|
};
|
|
}, []);
|
|
|
|
const resetClipIndicator = useCallback(() => {
|
|
setMasterIsClipping(false);
|
|
}, []);
|
|
|
|
const toggleLoop = useCallback(() => {
|
|
setLoopEnabled(prev => !prev);
|
|
}, []);
|
|
|
|
const setLoopPoints = useCallback((start: number, end: number) => {
|
|
setLoopStart(Math.max(0, start));
|
|
setLoopEnd(Math.min(duration, Math.max(start, end)));
|
|
}, [duration]);
|
|
|
|
const setLoopFromSelection = useCallback((selectionStart: number, selectionEnd: number) => {
|
|
if (selectionStart < selectionEnd) {
|
|
setLoopPoints(selectionStart, selectionEnd);
|
|
setLoopEnabled(true);
|
|
}
|
|
}, [setLoopPoints]);
|
|
|
|
return {
|
|
isPlaying,
|
|
currentTime,
|
|
duration,
|
|
trackLevels,
|
|
masterPeakLevel,
|
|
masterRmsLevel,
|
|
masterIsClipping,
|
|
masterAnalyser: masterAnalyserRef.current,
|
|
resetClipIndicator,
|
|
play,
|
|
pause,
|
|
stop,
|
|
seek,
|
|
togglePlayPause,
|
|
loopEnabled,
|
|
loopStart,
|
|
loopEnd,
|
|
toggleLoop,
|
|
setLoopPoints,
|
|
setLoopFromSelection,
|
|
};
|
|
}
|