Files
audio-ui/lib/hooks/useRecording.ts
Sebastian Krüger db01209f77 feat: implement professional logarithmic dB scale for level meters
Converted level meters from linear to logarithmic (dB) scale to
match professional audio software behavior and human hearing.

The Problem:
- Linear scale (0-100%) doesn't match perceived loudness
- Doesn't match professional DAW meter behavior
- Half-volume audio appears at 50% but sounds much quieter
- No industry-standard dB reference

The Solution:
- Convert linear amplitude to dB: 20 * log10(linear)
- Normalize -60dB to 0dB range to 0-100% display
- Matches professional audio metering standards

dB Scale Mapping:
  0 dB (linear 1.0)    = 100% (full scale, clipping)
 -6 dB (linear ~0.5)   = 90%  (loud)
-12 dB (linear ~0.25)  = 80%  (normal)
-20 dB (linear ~0.1)   = 67%  (moderate)
-40 dB (linear ~0.01)  = 33%  (quiet)
-60 dB (linear ~0.001) = 0%   (silence threshold)

Implementation:
- Added linearToDbScale() function to both hooks
- useMultiTrackPlayer: playback level monitoring
- useRecording: input level monitoring
- Formula: (dB - minDb) / (maxDb - minDb)
- Range: -60dB (min) to 0dB (max)

Benefits:
 Professional audio metering standards
 Matches human perception of loudness
 Consistent with DAWs (Pro Tools, Logic, Ableton)
 Better visual feedback for mixing/mastering
 More responsive in useful range (-20dB to 0dB)

Now properly mastered tracks will show levels in the
90-100% range, matching what you'd see in professional software.

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:23:33 +01:00

286 lines
8.4 KiB
TypeScript

'use client';
import * as React from 'react';
export interface RecordingState {
isRecording: boolean;
isPaused: boolean;
duration: number;
inputLevel: number;
}
export interface UseRecordingReturn {
state: RecordingState;
startRecording: () => Promise<void>;
stopRecording: () => Promise<AudioBuffer | null>;
pauseRecording: () => void;
resumeRecording: () => void;
getInputDevices: () => Promise<MediaDeviceInfo[]>;
selectInputDevice: (deviceId: string) => Promise<void>;
requestPermission: () => Promise<boolean>;
}
export function useRecording(): UseRecordingReturn {
const [state, setState] = React.useState<RecordingState>({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
const audioContextRef = React.useRef<AudioContext | null>(null);
const analyserRef = React.useRef<AnalyserNode | null>(null);
const streamRef = React.useRef<MediaStream | null>(null);
const chunksRef = React.useRef<Blob[]>([]);
const startTimeRef = React.useRef<number>(0);
const animationFrameRef = React.useRef<number>(0);
const selectedDeviceIdRef = React.useRef<string>('');
const isMonitoringRef = React.useRef<boolean>(false);
// Request microphone permission
const requestPermission = React.useCallback(async (): Promise<boolean> => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
stream.getTracks().forEach((track) => track.stop());
return true;
} catch (error) {
console.error('Microphone permission denied:', error);
return false;
}
}, []);
// Get available input devices
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter((device) => device.kind === 'audioinput');
} catch (error) {
console.error('Failed to enumerate devices:', error);
return [];
}
}, []);
// Select input device
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
selectedDeviceIdRef.current = deviceId;
}, []);
// Convert linear amplitude to dB scale normalized to 0-1 range
const linearToDbScale = React.useCallback((linear: number): number => {
if (linear === 0) return 0;
// Convert to dB (20 * log10(linear))
const db = 20 * Math.log10(linear);
// Normalize -60dB to 0dB range to 0-1
// -60dB or lower = 0%, 0dB = 100%
const minDb = -60;
const maxDb = 0;
const normalized = (db - minDb) / (maxDb - minDb);
// Clamp to 0-1 range
return Math.max(0, Math.min(1, normalized));
}, []);
// Monitor input level
const monitorInputLevel = React.useCallback(() => {
if (!analyserRef.current) return;
const analyser = analyserRef.current;
const dataArray = new Float32Array(analyser.fftSize);
const updateLevel = () => {
if (!isMonitoringRef.current) return;
analyser.getFloatTimeDomainData(dataArray);
// Calculate peak level using float data (-1 to +1 range)
let peak = 0;
for (let i = 0; i < dataArray.length; i++) {
const abs = Math.abs(dataArray[i]);
if (abs > peak) {
peak = abs;
}
}
// Convert linear peak to logarithmic dB scale
const dbLevel = linearToDbScale(peak);
setState((prev) => ({ ...prev, inputLevel: dbLevel }));
animationFrameRef.current = requestAnimationFrame(updateLevel);
};
updateLevel();
}, [linearToDbScale]);
// Start recording
const startRecording = React.useCallback(async (): Promise<void> => {
try {
// Get user media with selected device
const constraints: MediaStreamConstraints = {
audio: selectedDeviceIdRef.current
? { deviceId: { exact: selectedDeviceIdRef.current } }
: true,
};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
streamRef.current = stream;
// Create audio context and analyser for level monitoring
const audioContext = new AudioContext();
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
source.connect(analyser);
analyserRef.current = analyser;
// Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
// Start recording
mediaRecorder.start();
startTimeRef.current = Date.now();
setState({
isRecording: true,
isPaused: false,
duration: 0,
inputLevel: 0,
});
// Start monitoring input level
isMonitoringRef.current = true;
monitorInputLevel();
} catch (error) {
console.error('Failed to start recording:', error);
throw error;
}
}, [monitorInputLevel]);
// Stop recording and return AudioBuffer
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
return new Promise((resolve) => {
if (!mediaRecorderRef.current || !streamRef.current) {
resolve(null);
return;
}
const mediaRecorder = mediaRecorderRef.current;
mediaRecorder.onstop = async () => {
// Stop all tracks
streamRef.current?.getTracks().forEach((track) => track.stop());
// Create blob from recorded chunks
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
// Convert blob to AudioBuffer
try {
const arrayBuffer = await blob.arrayBuffer();
const audioContext = new AudioContext();
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Clean up
isMonitoringRef.current = false;
if (audioContextRef.current) {
await audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
setState({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
resolve(audioBuffer);
} catch (error) {
console.error('Failed to decode recorded audio:', error);
resolve(null);
}
};
mediaRecorder.stop();
});
}, []);
// Pause recording
const pauseRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
mediaRecorderRef.current.pause();
setState((prev) => ({ ...prev, isPaused: true }));
isMonitoringRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
}
}, [state.isRecording, state.isPaused]);
// Resume recording
const resumeRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
mediaRecorderRef.current.resume();
setState((prev) => ({ ...prev, isPaused: false }));
isMonitoringRef.current = true;
monitorInputLevel();
}
}, [state.isRecording, state.isPaused, monitorInputLevel]);
// Update duration
React.useEffect(() => {
if (!state.isRecording || state.isPaused) return;
const interval = setInterval(() => {
const elapsed = (Date.now() - startTimeRef.current) / 1000;
setState((prev) => ({ ...prev, duration: elapsed }));
}, 100);
return () => clearInterval(interval);
}, [state.isRecording, state.isPaused]);
// Cleanup on unmount
React.useEffect(() => {
return () => {
isMonitoringRef.current = false;
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, []);
return {
state,
startRecording,
stopRecording,
pauseRecording,
resumeRecording,
getInputDevices,
selectInputDevice,
requestPermission,
};
}