Files
audio-ui/lib/hooks/useMultiTrackPlayer.ts
Sebastian Krüger 3e6fbda755 fix: move analyser before gain node to show true audio levels
Repositioned analyser nodes in audio graph to measure raw audio
levels before volume/gain adjustments.

The Problem:
- Analyser was after gain node in signal chain
- Track volume defaults to 0.8 (80%)
- Audio was scaled down before measurement
- Meters only showed ~50% of actual audio peaks

The Solution:
- Moved analyser to immediately after source
- Now measures raw audio before any processing
- Shows true audio content independent of fader position

Audio Graph Changes:
Before: source -> gain -> pan -> effects -> analyser -> master
After:  source -> analyser -> gain -> pan -> effects -> master

Benefits:
 Meters show full 0-100% range based on audio content
 Meter reading independent of volume fader position
 Accurate representation of track audio levels
 Increased smoothingTimeConstant to 0.8 for smoother motion

This is how professional DAWs work - level meters show the
audio content, not the output level after the fader.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:17:42 +01:00

579 lines
19 KiB
TypeScript

import { useState, useCallback, useRef, useEffect } from 'react';
import { getAudioContext } from '@/lib/audio/context';
import type { Track } from '@/types/track';
import { getTrackGain } from '@/lib/audio/track-utils';
import { applyEffectChain, updateEffectParameters, toggleEffectBypass, type EffectNodeInfo } from '@/lib/audio/effects/processor';
export interface MultiTrackPlayerState {
isPlaying: boolean;
currentTime: number;
duration: number;
}
export interface TrackLevel {
trackId: string;
level: number;
}
export function useMultiTrackPlayer(tracks: Track[], masterVolume: number = 1) {
const [isPlaying, setIsPlaying] = useState(false);
const [currentTime, setCurrentTime] = useState(0);
const [duration, setDuration] = useState(0);
const [trackLevels, setTrackLevels] = useState<Record<string, number>>({});
const audioContextRef = useRef<AudioContext | null>(null);
const sourceNodesRef = useRef<AudioBufferSourceNode[]>([]);
const gainNodesRef = useRef<GainNode[]>([]);
const panNodesRef = useRef<StereoPannerNode[]>([]);
const analyserNodesRef = useRef<AnalyserNode[]>([]);
const effectNodesRef = useRef<EffectNodeInfo[][]>([]); // Effect nodes per track
const masterGainNodeRef = useRef<GainNode | null>(null);
const startTimeRef = useRef<number>(0);
const pausedAtRef = useRef<number>(0);
const animationFrameRef = useRef<number | null>(null);
const levelMonitorFrameRef = useRef<number | null>(null);
const isMonitoringLevelsRef = useRef<boolean>(false);
const tracksRef = useRef<Track[]>(tracks); // Always keep latest tracks
// Keep tracksRef in sync with tracks prop
useEffect(() => {
tracksRef.current = tracks;
}, [tracks]);
// Calculate total duration from all tracks
useEffect(() => {
let maxDuration = 0;
for (const track of tracks) {
if (track.audioBuffer) {
maxDuration = Math.max(maxDuration, track.audioBuffer.duration);
}
}
setDuration(maxDuration);
}, [tracks]);
// Monitor playback levels for all tracks
const monitorPlaybackLevels = useCallback(() => {
if (!isMonitoringLevelsRef.current || analyserNodesRef.current.length === 0) return;
const levels: Record<string, number> = {};
analyserNodesRef.current.forEach((analyser, index) => {
const track = tracksRef.current[index];
if (!track) return;
const dataArray = new Uint8Array(analyser.frequencyBinCount);
analyser.getByteTimeDomainData(dataArray);
// Calculate peak level (more responsive than RMS for visual meters)
let peak = 0;
for (let i = 0; i < dataArray.length; i++) {
const normalized = Math.abs((dataArray[i] - 128) / 128);
if (normalized > peak) {
peak = normalized;
}
}
levels[track.id] = peak;
});
setTrackLevels(levels);
levelMonitorFrameRef.current = requestAnimationFrame(monitorPlaybackLevels);
}, []);
const updatePlaybackPosition = useCallback(() => {
if (!audioContextRef.current) return;
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
const newTime = pausedAtRef.current + elapsed;
if (newTime >= duration) {
setIsPlaying(false);
isMonitoringLevelsRef.current = false;
setCurrentTime(0);
pausedAtRef.current = 0;
setTrackLevels({});
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
animationFrameRef.current = null;
}
if (levelMonitorFrameRef.current) {
cancelAnimationFrame(levelMonitorFrameRef.current);
levelMonitorFrameRef.current = null;
}
return;
}
setCurrentTime(newTime);
animationFrameRef.current = requestAnimationFrame(updatePlaybackPosition);
}, [duration]);
const play = useCallback(() => {
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) return;
const audioContext = getAudioContext();
audioContextRef.current = audioContext;
// Stop any existing playback
sourceNodesRef.current.forEach(node => {
try {
node.stop();
node.disconnect();
} catch (e) {
// Ignore errors from already stopped nodes
}
});
gainNodesRef.current.forEach(node => node.disconnect());
panNodesRef.current.forEach(node => node.disconnect());
if (masterGainNodeRef.current) {
masterGainNodeRef.current.disconnect();
}
sourceNodesRef.current = [];
gainNodesRef.current = [];
panNodesRef.current = [];
analyserNodesRef.current = [];
effectNodesRef.current = [];
// Create master gain node
const masterGain = audioContext.createGain();
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
masterGain.connect(audioContext.destination);
masterGainNodeRef.current = masterGain;
// Create audio graph for each track
for (const track of tracks) {
if (!track.audioBuffer) continue;
const source = audioContext.createBufferSource();
source.buffer = track.audioBuffer;
const gainNode = audioContext.createGain();
const panNode = audioContext.createStereoPanner();
const analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 256;
analyserNode.smoothingTimeConstant = 0.8;
// Set gain based on track volume and solo/mute state
const trackGain = getTrackGain(track, tracks);
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
// Set pan
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
// Connect: source -> analyser -> gain -> pan -> effects -> master gain -> destination
// Analyser is before gain so it shows raw audio levels independent of volume fader
source.connect(analyserNode);
analyserNode.connect(gainNode);
gainNode.connect(panNode);
// Apply effect chain
console.log('[MultiTrackPlayer] Applying effect chain for track:', track.name);
console.log('[MultiTrackPlayer] Effect chain ID:', track.effectChain.id);
console.log('[MultiTrackPlayer] Effect chain name:', track.effectChain.name);
console.log('[MultiTrackPlayer] Number of effects:', track.effectChain.effects.length);
console.log('[MultiTrackPlayer] Effects:', track.effectChain.effects);
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
// Connect to master gain
outputNode.connect(masterGain);
console.log('[MultiTrackPlayer] Effect output connected with', effectNodes.length, 'effect nodes');
// Start playback from current position
source.start(0, pausedAtRef.current);
// Store references
sourceNodesRef.current.push(source);
gainNodesRef.current.push(gainNode);
panNodesRef.current.push(panNode);
analyserNodesRef.current.push(analyserNode);
effectNodesRef.current.push(effectNodes);
// Handle ended event
source.onended = () => {
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
setIsPlaying(false);
isMonitoringLevelsRef.current = false;
setCurrentTime(0);
pausedAtRef.current = 0;
setTrackLevels({});
}
};
}
startTimeRef.current = audioContext.currentTime;
setIsPlaying(true);
updatePlaybackPosition();
// Start level monitoring
isMonitoringLevelsRef.current = true;
monitorPlaybackLevels();
}, [tracks, duration, masterVolume, updatePlaybackPosition, monitorPlaybackLevels]);
const pause = useCallback(() => {
if (!audioContextRef.current || !isPlaying) return;
// Stop all source nodes
sourceNodesRef.current.forEach(node => {
try {
node.stop();
node.disconnect();
} catch (e) {
// Ignore errors
}
});
// Update paused position
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
pausedAtRef.current = Math.min(pausedAtRef.current + elapsed, duration);
setCurrentTime(pausedAtRef.current);
setIsPlaying(false);
// Stop level monitoring
isMonitoringLevelsRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
animationFrameRef.current = null;
}
if (levelMonitorFrameRef.current) {
cancelAnimationFrame(levelMonitorFrameRef.current);
levelMonitorFrameRef.current = null;
}
// Clear track levels
setTrackLevels({});
}, [isPlaying, duration]);
const stop = useCallback(() => {
pause();
pausedAtRef.current = 0;
setCurrentTime(0);
}, [pause]);
const seek = useCallback((time: number) => {
const wasPlaying = isPlaying;
if (wasPlaying) {
pause();
}
const clampedTime = Math.max(0, Math.min(time, duration));
pausedAtRef.current = clampedTime;
setCurrentTime(clampedTime);
if (wasPlaying) {
// Small delay to ensure state is updated
setTimeout(() => play(), 10);
}
}, [isPlaying, duration, pause, play]);
const togglePlayPause = useCallback(() => {
if (isPlaying) {
pause();
} else {
play();
}
}, [isPlaying, play, pause]);
// Update gain/pan when tracks change (simple updates that don't require graph rebuild)
useEffect(() => {
if (!isPlaying || !audioContextRef.current) return;
tracks.forEach((track, index) => {
if (gainNodesRef.current[index]) {
const trackGain = getTrackGain(track, tracks);
gainNodesRef.current[index].gain.setValueAtTime(
trackGain,
audioContextRef.current!.currentTime
);
}
if (panNodesRef.current[index]) {
panNodesRef.current[index].pan.setValueAtTime(
track.pan,
audioContextRef.current!.currentTime
);
}
});
}, [tracks, isPlaying]);
// Track effect chain structure to detect add/remove operations
const previousEffectStructureRef = useRef<string | null>(null);
// Detect effect chain structure changes (add/remove/reorder) and restart
useEffect(() => {
if (!isPlaying || !audioContextRef.current) return;
// Create a signature of the current effect structure (IDs and count)
const currentStructure = tracks.map(track =>
track.effectChain.effects.map(e => e.id).join(',')
).join('|');
// If structure changed (effects added/removed/reordered) while playing, restart
// Don't restart if tracks is empty (intermediate state during updates)
if (previousEffectStructureRef.current !== null &&
previousEffectStructureRef.current !== currentStructure &&
tracks.length > 0) {
console.log('[useMultiTrackPlayer] Effect chain structure changed, restarting...');
// Update the reference immediately to prevent re-triggering
previousEffectStructureRef.current = currentStructure;
// Update tracksRef with current tracks BEFORE setTimeout
tracksRef.current = tracks;
// Save current position
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
const currentPos = pausedAtRef.current + elapsed;
// Stop all source nodes
sourceNodesRef.current.forEach(node => {
try {
node.onended = null;
node.stop();
node.disconnect();
} catch (e) {
// Ignore errors
}
});
// Update position
pausedAtRef.current = currentPos;
setCurrentTime(currentPos);
setIsPlaying(false);
// Clear animation frame
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
animationFrameRef.current = null;
}
// Restart after a brief delay
setTimeout(() => {
// Use tracksRef.current to get the latest tracks, not the stale closure
const latestTracks = tracksRef.current;
if (latestTracks.length === 0 || latestTracks.every(t => !t.audioBuffer)) return;
const audioContext = getAudioContext();
audioContextRef.current = audioContext;
// Disconnect old nodes
gainNodesRef.current.forEach(node => node.disconnect());
panNodesRef.current.forEach(node => node.disconnect());
effectNodesRef.current.forEach(trackEffects => {
trackEffects.forEach(effectNodeInfo => {
if (effectNodeInfo.node) {
try {
effectNodeInfo.node.disconnect();
} catch (e) {
// Ignore
}
}
if (effectNodeInfo.dryGain) effectNodeInfo.dryGain.disconnect();
if (effectNodeInfo.wetGain) effectNodeInfo.wetGain.disconnect();
});
});
if (masterGainNodeRef.current) {
masterGainNodeRef.current.disconnect();
}
// Reset refs
sourceNodesRef.current = [];
gainNodesRef.current = [];
panNodesRef.current = [];
analyserNodesRef.current = [];
effectNodesRef.current = [];
// Create master gain node
const masterGain = audioContext.createGain();
masterGain.gain.setValueAtTime(masterVolume, audioContext.currentTime);
masterGain.connect(audioContext.destination);
masterGainNodeRef.current = masterGain;
// Create audio graph for each track
for (const track of latestTracks) {
if (!track.audioBuffer) continue;
const source = audioContext.createBufferSource();
source.buffer = track.audioBuffer;
const gainNode = audioContext.createGain();
const panNode = audioContext.createStereoPanner();
const analyserNode = audioContext.createAnalyser();
analyserNode.fftSize = 256;
analyserNode.smoothingTimeConstant = 0.8;
// Set gain based on track volume and solo/mute state
const trackGain = getTrackGain(track, latestTracks);
gainNode.gain.setValueAtTime(trackGain, audioContext.currentTime);
// Set pan
panNode.pan.setValueAtTime(track.pan, audioContext.currentTime);
// Connect: source -> analyser -> gain -> pan -> effects -> master gain -> destination
// Analyser is before gain so it shows raw audio levels independent of volume fader
source.connect(analyserNode);
analyserNode.connect(gainNode);
gainNode.connect(panNode);
// Apply effect chain
const { outputNode, effectNodes } = applyEffectChain(audioContext, panNode, track.effectChain);
outputNode.connect(masterGain);
// Start playback from current position
source.start(0, pausedAtRef.current);
// Store references
sourceNodesRef.current.push(source);
gainNodesRef.current.push(gainNode);
panNodesRef.current.push(panNode);
analyserNodesRef.current.push(analyserNode);
effectNodesRef.current.push(effectNodes);
// Handle ended event
source.onended = () => {
if (pausedAtRef.current + (audioContext.currentTime - startTimeRef.current) >= duration) {
setIsPlaying(false);
isMonitoringLevelsRef.current = false;
setCurrentTime(0);
pausedAtRef.current = 0;
setTrackLevels({});
}
};
}
startTimeRef.current = audioContext.currentTime;
setIsPlaying(true);
// Start level monitoring
isMonitoringLevelsRef.current = true;
// Start animation frame for position updates
const updatePosition = () => {
if (!audioContextRef.current) return;
const elapsed = audioContextRef.current.currentTime - startTimeRef.current;
const newTime = pausedAtRef.current + elapsed;
if (newTime >= duration) {
setIsPlaying(false);
isMonitoringLevelsRef.current = false;
setCurrentTime(0);
pausedAtRef.current = 0;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
animationFrameRef.current = null;
}
if (levelMonitorFrameRef.current) {
cancelAnimationFrame(levelMonitorFrameRef.current);
levelMonitorFrameRef.current = null;
}
return;
}
setCurrentTime(newTime);
animationFrameRef.current = requestAnimationFrame(updatePosition);
};
updatePosition();
monitorPlaybackLevels();
}, 10);
}
previousEffectStructureRef.current = currentStructure;
}, [tracks, isPlaying, duration, masterVolume, monitorPlaybackLevels]);
// Stop playback when all tracks are deleted
useEffect(() => {
if (!isPlaying) return;
// If tracks become empty or all tracks have no audio buffer, stop playback
if (tracks.length === 0 || tracks.every(t => !t.audioBuffer)) {
console.log('[useMultiTrackPlayer] All tracks deleted, stopping playback');
stop();
}
}, [tracks, isPlaying, stop]);
// Update effect parameters and bypass state in real-time
useEffect(() => {
if (!isPlaying || !audioContextRef.current) return;
tracks.forEach((track, trackIndex) => {
const effectNodes = effectNodesRef.current[trackIndex];
if (!effectNodes) return;
// Only update if we have the same number of effects (no add/remove)
if (effectNodes.length !== track.effectChain.effects.length) return;
track.effectChain.effects.forEach((effect, effectIndex) => {
const effectNodeInfo = effectNodes[effectIndex];
if (!effectNodeInfo) return;
// Update bypass state
if (effect.enabled !== effectNodeInfo.effect.enabled) {
toggleEffectBypass(audioContextRef.current!, effectNodeInfo, effect.enabled);
effectNodeInfo.effect.enabled = effect.enabled;
}
// Update parameters (only works for certain effect types)
if (JSON.stringify(effect.parameters) !== JSON.stringify(effectNodeInfo.effect.parameters)) {
updateEffectParameters(audioContextRef.current!, effectNodeInfo, effect);
effectNodeInfo.effect.parameters = effect.parameters;
}
});
});
}, [tracks, isPlaying]);
// Update master volume when it changes
useEffect(() => {
if (!isPlaying || !audioContextRef.current || !masterGainNodeRef.current) return;
masterGainNodeRef.current.gain.setValueAtTime(
masterVolume,
audioContextRef.current.currentTime
);
}, [masterVolume, isPlaying]);
// Cleanup on unmount
useEffect(() => {
return () => {
isMonitoringLevelsRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
if (levelMonitorFrameRef.current) {
cancelAnimationFrame(levelMonitorFrameRef.current);
}
sourceNodesRef.current.forEach(node => {
try {
node.stop();
node.disconnect();
} catch (e) {
// Ignore
}
});
gainNodesRef.current.forEach(node => node.disconnect());
panNodesRef.current.forEach(node => node.disconnect());
analyserNodesRef.current.forEach(node => node.disconnect());
if (masterGainNodeRef.current) {
masterGainNodeRef.current.disconnect();
}
};
}, []);
return {
isPlaying,
currentTime,
duration,
trackLevels,
play,
pause,
stop,
seek,
togglePlayPause,
};
}