Files
audio-ui/lib/hooks/useRecording.ts
Sebastian Krüger 8367cbf6e7 fix: switch from RMS to peak detection for more accurate level meters
Changed level calculation from RMS to peak detection to show
more realistic and responsive meter values.

The Problem:
- RMS calculation produced values typically in 0-30% range
- Audio signals have low average RMS (0.1-0.3 for music)
- Meters appeared broken, never reaching higher levels

The Solution:
- Switched to peak detection (max absolute value)
- Peaks now properly show 0-100% range
- More responsive to transients and dynamics
- Matches typical DAW meter behavior

Algorithm Change:
Before (RMS):
  rms = sqrt(sum(normalized²) / length)

After (Peak):
  peak = max(abs(normalized))

Applied to Both:
- Recording input level monitoring (useRecording)
- Playback output level monitoring (useMultiTrackPlayer)

Benefits:
 Full 0-100% range utilization
 More responsive visual feedback
 Accurate representation of audio peaks
 Consistent with professional audio software

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:15:16 +01:00

266 lines
7.8 KiB
TypeScript

'use client';
import * as React from 'react';
export interface RecordingState {
isRecording: boolean;
isPaused: boolean;
duration: number;
inputLevel: number;
}
export interface UseRecordingReturn {
state: RecordingState;
startRecording: () => Promise<void>;
stopRecording: () => Promise<AudioBuffer | null>;
pauseRecording: () => void;
resumeRecording: () => void;
getInputDevices: () => Promise<MediaDeviceInfo[]>;
selectInputDevice: (deviceId: string) => Promise<void>;
requestPermission: () => Promise<boolean>;
}
export function useRecording(): UseRecordingReturn {
const [state, setState] = React.useState<RecordingState>({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
const audioContextRef = React.useRef<AudioContext | null>(null);
const analyserRef = React.useRef<AnalyserNode | null>(null);
const streamRef = React.useRef<MediaStream | null>(null);
const chunksRef = React.useRef<Blob[]>([]);
const startTimeRef = React.useRef<number>(0);
const animationFrameRef = React.useRef<number>(0);
const selectedDeviceIdRef = React.useRef<string>('');
const isMonitoringRef = React.useRef<boolean>(false);
// Request microphone permission
const requestPermission = React.useCallback(async (): Promise<boolean> => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
stream.getTracks().forEach((track) => track.stop());
return true;
} catch (error) {
console.error('Microphone permission denied:', error);
return false;
}
}, []);
// Get available input devices
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter((device) => device.kind === 'audioinput');
} catch (error) {
console.error('Failed to enumerate devices:', error);
return [];
}
}, []);
// Select input device
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
selectedDeviceIdRef.current = deviceId;
}, []);
// Monitor input level
const monitorInputLevel = React.useCallback(() => {
if (!analyserRef.current) return;
const analyser = analyserRef.current;
const dataArray = new Uint8Array(analyser.frequencyBinCount);
const updateLevel = () => {
if (!isMonitoringRef.current) return;
analyser.getByteTimeDomainData(dataArray);
// Calculate peak level (more responsive than RMS for visual meters)
let peak = 0;
for (let i = 0; i < dataArray.length; i++) {
const normalized = Math.abs((dataArray[i] - 128) / 128);
if (normalized > peak) {
peak = normalized;
}
}
setState((prev) => ({ ...prev, inputLevel: peak }));
animationFrameRef.current = requestAnimationFrame(updateLevel);
};
updateLevel();
}, []);
// Start recording
const startRecording = React.useCallback(async (): Promise<void> => {
try {
// Get user media with selected device
const constraints: MediaStreamConstraints = {
audio: selectedDeviceIdRef.current
? { deviceId: { exact: selectedDeviceIdRef.current } }
: true,
};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
streamRef.current = stream;
// Create audio context and analyser for level monitoring
const audioContext = new AudioContext();
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
source.connect(analyser);
analyserRef.current = analyser;
// Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
// Start recording
mediaRecorder.start();
startTimeRef.current = Date.now();
setState({
isRecording: true,
isPaused: false,
duration: 0,
inputLevel: 0,
});
// Start monitoring input level
isMonitoringRef.current = true;
monitorInputLevel();
} catch (error) {
console.error('Failed to start recording:', error);
throw error;
}
}, [monitorInputLevel]);
// Stop recording and return AudioBuffer
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
return new Promise((resolve) => {
if (!mediaRecorderRef.current || !streamRef.current) {
resolve(null);
return;
}
const mediaRecorder = mediaRecorderRef.current;
mediaRecorder.onstop = async () => {
// Stop all tracks
streamRef.current?.getTracks().forEach((track) => track.stop());
// Create blob from recorded chunks
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
// Convert blob to AudioBuffer
try {
const arrayBuffer = await blob.arrayBuffer();
const audioContext = new AudioContext();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Clean up
isMonitoringRef.current = false;
if (audioContextRef.current) {
await audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
setState({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
resolve(audioBuffer);
} catch (error) {
console.error('Failed to decode recorded audio:', error);
resolve(null);
}
};
mediaRecorder.stop();
});
}, []);
// Pause recording
const pauseRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
mediaRecorderRef.current.pause();
setState((prev) => ({ ...prev, isPaused: true }));
isMonitoringRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
}
}, [state.isRecording, state.isPaused]);
// Resume recording
const resumeRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
mediaRecorderRef.current.resume();
setState((prev) => ({ ...prev, isPaused: false }));
isMonitoringRef.current = true;
monitorInputLevel();
}
}, [state.isRecording, state.isPaused, monitorInputLevel]);
// Update duration
React.useEffect(() => {
if (!state.isRecording || state.isPaused) return;
const interval = setInterval(() => {
const elapsed = (Date.now() - startTimeRef.current) / 1000;
setState((prev) => ({ ...prev, duration: elapsed }));
}, 100);
return () => clearInterval(interval);
}, [state.isRecording, state.isPaused]);
// Cleanup on unmount
React.useEffect(() => {
return () => {
isMonitoringRef.current = false;
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, []);
return {
state,
startRecording,
stopRecording,
pauseRecording,
resumeRecording,
getInputDevices,
selectInputDevice,
requestPermission,
};
}