Files
audio-ui/lib/hooks/useRecording.ts
Sebastian Krüger a0ce83a654 fix: use Float32Array for accurate full-range level measurement
Switched from Uint8Array to Float32Array for level monitoring
to get accurate, full-precision audio measurements.

The Problem:
- getByteTimeDomainData() uses Uint8Array (0-255)
- Byte conversion: (value - 128) / 128 has asymmetric range
- Positive peaks: (255-128)/128 = 0.992 (not full 1.0)
- Precision loss from byte quantization
- Mastered tracks with peaks at 0dBFS only showed ~50%

The Solution:
- Switched to getFloatTimeDomainData() with Float32Array
- Returns actual sample values directly in -1.0 to +1.0 range
- No conversion needed, no precision loss
- Accurate representation of audio peaks

Changes Applied:
- useMultiTrackPlayer: Float32Array with analyser.fftSize samples
- useRecording: Float32Array with analyser.fftSize samples
- Peak detection: Math.abs() on float values directly

Benefits:
 Full 0-100% range for properly mastered audio
 Higher precision (32-bit float vs 8-bit byte)
 Symmetric range (-1.0 to +1.0, not -1.0 to ~0.992)
 Accurate metering for professional audio files

Now mastered tracks with peaks at 0dBFS will correctly show
~100% on the meters instead of being capped at 50%.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:21:31 +01:00

266 lines
7.7 KiB
TypeScript

'use client';
import * as React from 'react';
export interface RecordingState {
isRecording: boolean;
isPaused: boolean;
duration: number;
inputLevel: number;
}
export interface UseRecordingReturn {
state: RecordingState;
startRecording: () => Promise<void>;
stopRecording: () => Promise<AudioBuffer | null>;
pauseRecording: () => void;
resumeRecording: () => void;
getInputDevices: () => Promise<MediaDeviceInfo[]>;
selectInputDevice: (deviceId: string) => Promise<void>;
requestPermission: () => Promise<boolean>;
}
export function useRecording(): UseRecordingReturn {
const [state, setState] = React.useState<RecordingState>({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
const audioContextRef = React.useRef<AudioContext | null>(null);
const analyserRef = React.useRef<AnalyserNode | null>(null);
const streamRef = React.useRef<MediaStream | null>(null);
const chunksRef = React.useRef<Blob[]>([]);
const startTimeRef = React.useRef<number>(0);
const animationFrameRef = React.useRef<number>(0);
const selectedDeviceIdRef = React.useRef<string>('');
const isMonitoringRef = React.useRef<boolean>(false);
// Request microphone permission
const requestPermission = React.useCallback(async (): Promise<boolean> => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
stream.getTracks().forEach((track) => track.stop());
return true;
} catch (error) {
console.error('Microphone permission denied:', error);
return false;
}
}, []);
// Get available input devices
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter((device) => device.kind === 'audioinput');
} catch (error) {
console.error('Failed to enumerate devices:', error);
return [];
}
}, []);
// Select input device
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
selectedDeviceIdRef.current = deviceId;
}, []);
// Monitor input level
const monitorInputLevel = React.useCallback(() => {
if (!analyserRef.current) return;
const analyser = analyserRef.current;
const dataArray = new Float32Array(analyser.fftSize);
const updateLevel = () => {
if (!isMonitoringRef.current) return;
analyser.getFloatTimeDomainData(dataArray);
// Calculate peak level using float data (-1 to +1 range)
let peak = 0;
for (let i = 0; i < dataArray.length; i++) {
const abs = Math.abs(dataArray[i]);
if (abs > peak) {
peak = abs;
}
}
setState((prev) => ({ ...prev, inputLevel: peak }));
animationFrameRef.current = requestAnimationFrame(updateLevel);
};
updateLevel();
}, []);
// Start recording
const startRecording = React.useCallback(async (): Promise<void> => {
try {
// Get user media with selected device
const constraints: MediaStreamConstraints = {
audio: selectedDeviceIdRef.current
? { deviceId: { exact: selectedDeviceIdRef.current } }
: true,
};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
streamRef.current = stream;
// Create audio context and analyser for level monitoring
const audioContext = new AudioContext();
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
source.connect(analyser);
analyserRef.current = analyser;
// Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
// Start recording
mediaRecorder.start();
startTimeRef.current = Date.now();
setState({
isRecording: true,
isPaused: false,
duration: 0,
inputLevel: 0,
});
// Start monitoring input level
isMonitoringRef.current = true;
monitorInputLevel();
} catch (error) {
console.error('Failed to start recording:', error);
throw error;
}
}, [monitorInputLevel]);
// Stop recording and return AudioBuffer
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
return new Promise((resolve) => {
if (!mediaRecorderRef.current || !streamRef.current) {
resolve(null);
return;
}
const mediaRecorder = mediaRecorderRef.current;
mediaRecorder.onstop = async () => {
// Stop all tracks
streamRef.current?.getTracks().forEach((track) => track.stop());
// Create blob from recorded chunks
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
// Convert blob to AudioBuffer
try {
const arrayBuffer = await blob.arrayBuffer();
const audioContext = new AudioContext();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Clean up
isMonitoringRef.current = false;
if (audioContextRef.current) {
await audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
setState({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
resolve(audioBuffer);
} catch (error) {
console.error('Failed to decode recorded audio:', error);
resolve(null);
}
};
mediaRecorder.stop();
});
}, []);
// Pause recording
const pauseRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
mediaRecorderRef.current.pause();
setState((prev) => ({ ...prev, isPaused: true }));
isMonitoringRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
}
}, [state.isRecording, state.isPaused]);
// Resume recording
const resumeRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
mediaRecorderRef.current.resume();
setState((prev) => ({ ...prev, isPaused: false }));
isMonitoringRef.current = true;
monitorInputLevel();
}
}, [state.isRecording, state.isPaused, monitorInputLevel]);
// Update duration
React.useEffect(() => {
if (!state.isRecording || state.isPaused) return;
const interval = setInterval(() => {
const elapsed = (Date.now() - startTimeRef.current) / 1000;
setState((prev) => ({ ...prev, duration: elapsed }));
}, 100);
return () => clearInterval(interval);
}, [state.isRecording, state.isPaused]);
// Cleanup on unmount
React.useEffect(() => {
return () => {
isMonitoringRef.current = false;
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, []);
return {
state,
startRecording,
stopRecording,
pauseRecording,
resumeRecording,
getInputDevices,
selectInputDevice,
requestPermission,
};
}