Files
audio-ui/lib/hooks/useRecording.ts

354 lines
11 KiB
TypeScript
Raw Normal View History

'use client';
import * as React from 'react';
export interface RecordingState {
isRecording: boolean;
isPaused: boolean;
duration: number;
inputLevel: number;
}
export interface RecordingSettings {
inputGain: number; // 0.0 to 2.0 (1.0 = unity)
recordMono: boolean; // true = mono, false = stereo
sampleRate: number; // target sample rate (44100, 48000, etc.)
}
export interface UseRecordingReturn {
state: RecordingState;
settings: RecordingSettings;
startRecording: () => Promise<void>;
stopRecording: () => Promise<AudioBuffer | null>;
pauseRecording: () => void;
resumeRecording: () => void;
getInputDevices: () => Promise<MediaDeviceInfo[]>;
selectInputDevice: (deviceId: string) => Promise<void>;
requestPermission: () => Promise<boolean>;
setInputGain: (gain: number) => void;
setRecordMono: (mono: boolean) => void;
setSampleRate: (sampleRate: number) => void;
}
export function useRecording(): UseRecordingReturn {
const [state, setState] = React.useState<RecordingState>({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
const [settings, setSettings] = React.useState<RecordingSettings>({
inputGain: 1.0,
recordMono: false,
sampleRate: 48000,
});
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
const audioContextRef = React.useRef<AudioContext | null>(null);
const analyserRef = React.useRef<AnalyserNode | null>(null);
const gainNodeRef = React.useRef<GainNode | null>(null);
const streamRef = React.useRef<MediaStream | null>(null);
const chunksRef = React.useRef<Blob[]>([]);
const startTimeRef = React.useRef<number>(0);
const animationFrameRef = React.useRef<number>(0);
const selectedDeviceIdRef = React.useRef<string>('');
const isMonitoringRef = React.useRef<boolean>(false);
// Request microphone permission
const requestPermission = React.useCallback(async (): Promise<boolean> => {
try {
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
stream.getTracks().forEach((track) => track.stop());
return true;
} catch (error) {
console.error('Microphone permission denied:', error);
return false;
}
}, []);
// Get available input devices
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
try {
const devices = await navigator.mediaDevices.enumerateDevices();
return devices.filter((device) => device.kind === 'audioinput');
} catch (error) {
console.error('Failed to enumerate devices:', error);
return [];
}
}, []);
// Select input device
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
selectedDeviceIdRef.current = deviceId;
}, []);
feat: implement professional logarithmic dB scale for level meters Converted level meters from linear to logarithmic (dB) scale to match professional audio software behavior and human hearing. The Problem: - Linear scale (0-100%) doesn't match perceived loudness - Doesn't match professional DAW meter behavior - Half-volume audio appears at 50% but sounds much quieter - No industry-standard dB reference The Solution: - Convert linear amplitude to dB: 20 * log10(linear) - Normalize -60dB to 0dB range to 0-100% display - Matches professional audio metering standards dB Scale Mapping: 0 dB (linear 1.0) = 100% (full scale, clipping) -6 dB (linear ~0.5) = 90% (loud) -12 dB (linear ~0.25) = 80% (normal) -20 dB (linear ~0.1) = 67% (moderate) -40 dB (linear ~0.01) = 33% (quiet) -60 dB (linear ~0.001) = 0% (silence threshold) Implementation: - Added linearToDbScale() function to both hooks - useMultiTrackPlayer: playback level monitoring - useRecording: input level monitoring - Formula: (dB - minDb) / (maxDb - minDb) - Range: -60dB (min) to 0dB (max) Benefits: ✅ Professional audio metering standards ✅ Matches human perception of loudness ✅ Consistent with DAWs (Pro Tools, Logic, Ableton) ✅ Better visual feedback for mixing/mastering ✅ More responsive in useful range (-20dB to 0dB) Now properly mastered tracks will show levels in the 90-100% range, matching what you'd see in professional software. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:23:33 +01:00
// Convert linear amplitude to dB scale normalized to 0-1 range
const linearToDbScale = React.useCallback((linear: number): number => {
if (linear === 0) return 0;
// Convert to dB (20 * log10(linear))
const db = 20 * Math.log10(linear);
// Normalize -60dB to 0dB range to 0-1
// -60dB or lower = 0%, 0dB = 100%
const minDb = -60;
const maxDb = 0;
const normalized = (db - minDb) / (maxDb - minDb);
// Clamp to 0-1 range
return Math.max(0, Math.min(1, normalized));
}, []);
// Monitor input level
const monitorInputLevel = React.useCallback(() => {
if (!analyserRef.current) return;
const analyser = analyserRef.current;
const dataArray = new Float32Array(analyser.fftSize);
const updateLevel = () => {
if (!isMonitoringRef.current) return;
analyser.getFloatTimeDomainData(dataArray);
// Calculate peak level using float data (-1 to +1 range)
let peak = 0;
for (let i = 0; i < dataArray.length; i++) {
const abs = Math.abs(dataArray[i]);
if (abs > peak) {
peak = abs;
}
}
feat: implement professional logarithmic dB scale for level meters Converted level meters from linear to logarithmic (dB) scale to match professional audio software behavior and human hearing. The Problem: - Linear scale (0-100%) doesn't match perceived loudness - Doesn't match professional DAW meter behavior - Half-volume audio appears at 50% but sounds much quieter - No industry-standard dB reference The Solution: - Convert linear amplitude to dB: 20 * log10(linear) - Normalize -60dB to 0dB range to 0-100% display - Matches professional audio metering standards dB Scale Mapping: 0 dB (linear 1.0) = 100% (full scale, clipping) -6 dB (linear ~0.5) = 90% (loud) -12 dB (linear ~0.25) = 80% (normal) -20 dB (linear ~0.1) = 67% (moderate) -40 dB (linear ~0.01) = 33% (quiet) -60 dB (linear ~0.001) = 0% (silence threshold) Implementation: - Added linearToDbScale() function to both hooks - useMultiTrackPlayer: playback level monitoring - useRecording: input level monitoring - Formula: (dB - minDb) / (maxDb - minDb) - Range: -60dB (min) to 0dB (max) Benefits: ✅ Professional audio metering standards ✅ Matches human perception of loudness ✅ Consistent with DAWs (Pro Tools, Logic, Ableton) ✅ Better visual feedback for mixing/mastering ✅ More responsive in useful range (-20dB to 0dB) Now properly mastered tracks will show levels in the 90-100% range, matching what you'd see in professional software. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:23:33 +01:00
// Convert linear peak to logarithmic dB scale
const dbLevel = linearToDbScale(peak);
setState((prev) => ({ ...prev, inputLevel: dbLevel }));
animationFrameRef.current = requestAnimationFrame(updateLevel);
};
updateLevel();
feat: implement professional logarithmic dB scale for level meters Converted level meters from linear to logarithmic (dB) scale to match professional audio software behavior and human hearing. The Problem: - Linear scale (0-100%) doesn't match perceived loudness - Doesn't match professional DAW meter behavior - Half-volume audio appears at 50% but sounds much quieter - No industry-standard dB reference The Solution: - Convert linear amplitude to dB: 20 * log10(linear) - Normalize -60dB to 0dB range to 0-100% display - Matches professional audio metering standards dB Scale Mapping: 0 dB (linear 1.0) = 100% (full scale, clipping) -6 dB (linear ~0.5) = 90% (loud) -12 dB (linear ~0.25) = 80% (normal) -20 dB (linear ~0.1) = 67% (moderate) -40 dB (linear ~0.01) = 33% (quiet) -60 dB (linear ~0.001) = 0% (silence threshold) Implementation: - Added linearToDbScale() function to both hooks - useMultiTrackPlayer: playback level monitoring - useRecording: input level monitoring - Formula: (dB - minDb) / (maxDb - minDb) - Range: -60dB (min) to 0dB (max) Benefits: ✅ Professional audio metering standards ✅ Matches human perception of loudness ✅ Consistent with DAWs (Pro Tools, Logic, Ableton) ✅ Better visual feedback for mixing/mastering ✅ More responsive in useful range (-20dB to 0dB) Now properly mastered tracks will show levels in the 90-100% range, matching what you'd see in professional software. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-18 15:23:33 +01:00
}, [linearToDbScale]);
// Start recording
const startRecording = React.useCallback(async (): Promise<void> => {
try {
// Get user media with selected device
const constraints: MediaStreamConstraints = {
audio: selectedDeviceIdRef.current
? { deviceId: { exact: selectedDeviceIdRef.current } }
: true,
};
const stream = await navigator.mediaDevices.getUserMedia(constraints);
streamRef.current = stream;
// Create audio context with target sample rate
const audioContext = new AudioContext({ sampleRate: settings.sampleRate });
audioContextRef.current = audioContext;
const source = audioContext.createMediaStreamSource(stream);
// Create gain node for input gain control
const gainNode = audioContext.createGain();
gainNode.gain.value = settings.inputGain;
gainNodeRef.current = gainNode;
// Create analyser for level monitoring
const analyser = audioContext.createAnalyser();
analyser.fftSize = 256;
analyser.smoothingTimeConstant = 0.3;
// Connect: source -> gain -> analyser
source.connect(gainNode);
gainNode.connect(analyser);
analyserRef.current = analyser;
// Create MediaRecorder
const mediaRecorder = new MediaRecorder(stream);
mediaRecorderRef.current = mediaRecorder;
chunksRef.current = [];
mediaRecorder.ondataavailable = (event) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
// Start recording
mediaRecorder.start();
startTimeRef.current = Date.now();
setState({
isRecording: true,
isPaused: false,
duration: 0,
inputLevel: 0,
});
// Start monitoring input level
isMonitoringRef.current = true;
monitorInputLevel();
} catch (error) {
console.error('Failed to start recording:', error);
throw error;
}
}, [monitorInputLevel, settings.sampleRate, settings.inputGain]);
// Stop recording and return AudioBuffer
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
return new Promise((resolve) => {
if (!mediaRecorderRef.current || !streamRef.current) {
resolve(null);
return;
}
const mediaRecorder = mediaRecorderRef.current;
mediaRecorder.onstop = async () => {
// Stop all tracks
streamRef.current?.getTracks().forEach((track) => track.stop());
// Create blob from recorded chunks
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
// Convert blob to AudioBuffer
try {
const arrayBuffer = await blob.arrayBuffer();
const audioContext = new AudioContext();
let audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Convert to mono if requested
if (settings.recordMono && audioBuffer.numberOfChannels > 1) {
const monoBuffer = audioContext.createBuffer(
1,
audioBuffer.length,
audioBuffer.sampleRate
);
const monoData = monoBuffer.getChannelData(0);
// Mix all channels to mono by averaging
for (let i = 0; i < audioBuffer.length; i++) {
let sum = 0;
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
sum += audioBuffer.getChannelData(channel)[i];
}
monoData[i] = sum / audioBuffer.numberOfChannels;
}
audioBuffer = monoBuffer;
}
// Clean up
isMonitoringRef.current = false;
if (audioContextRef.current) {
await audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
setState({
isRecording: false,
isPaused: false,
duration: 0,
inputLevel: 0,
});
resolve(audioBuffer);
} catch (error) {
console.error('Failed to decode recorded audio:', error);
resolve(null);
}
};
mediaRecorder.stop();
});
}, [settings.recordMono]);
// Pause recording
const pauseRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
mediaRecorderRef.current.pause();
setState((prev) => ({ ...prev, isPaused: true }));
isMonitoringRef.current = false;
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
}
}, [state.isRecording, state.isPaused]);
// Resume recording
const resumeRecording = React.useCallback(() => {
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
mediaRecorderRef.current.resume();
setState((prev) => ({ ...prev, isPaused: false }));
isMonitoringRef.current = true;
monitorInputLevel();
}
}, [state.isRecording, state.isPaused, monitorInputLevel]);
// Update duration
React.useEffect(() => {
if (!state.isRecording || state.isPaused) return;
const interval = setInterval(() => {
const elapsed = (Date.now() - startTimeRef.current) / 1000;
setState((prev) => ({ ...prev, duration: elapsed }));
}, 100);
return () => clearInterval(interval);
}, [state.isRecording, state.isPaused]);
// Cleanup on unmount
React.useEffect(() => {
return () => {
isMonitoringRef.current = false;
if (streamRef.current) {
streamRef.current.getTracks().forEach((track) => track.stop());
}
if (audioContextRef.current) {
audioContextRef.current.close();
}
if (animationFrameRef.current) {
cancelAnimationFrame(animationFrameRef.current);
}
};
}, []);
// Settings setters
const setInputGain = React.useCallback((gain: number) => {
setSettings((prev) => ({ ...prev, inputGain: Math.max(0, Math.min(2, gain)) }));
// Update gain node if recording
if (gainNodeRef.current) {
gainNodeRef.current.gain.value = Math.max(0, Math.min(2, gain));
}
}, []);
const setRecordMono = React.useCallback((mono: boolean) => {
setSettings((prev) => ({ ...prev, recordMono: mono }));
}, []);
const setSampleRate = React.useCallback((sampleRate: number) => {
setSettings((prev) => ({ ...prev, sampleRate }));
}, []);
return {
state,
settings,
startRecording,
stopRecording,
pauseRecording,
resumeRecording,
getInputDevices,
selectInputDevice,
requestPermission,
setInputGain,
setRecordMono,
setSampleRate,
};
}