Recording Settings (Phase 8.3): - Added input gain control (0.0-2.0 with dB display) - Real-time gain adjustment via GainNode during recording - Mono/Stereo recording mode selection - Sample rate matching (44.1kHz, 48kHz, 96kHz) - Mono conversion averages all channels when enabled - Recording settings panel shown when track is armed Components Created: - RecordingSettings.tsx: Settings panel with gain slider, mono/stereo toggle, sample rate buttons Components Modified: - useRecording hook: Added settings state and GainNode integration - AudioEditor: Pass recording settings to TrackList - TrackList: Forward settings to Track components - Track: Show RecordingSettings when armed for recording Technical Details: - GainNode inserted between source and analyser in recording chain - Real-time gain updates via gainNode.gain.value - AudioContext created with target sample rate - Mono conversion done post-recording by averaging channels - Settings persist during recording session Phase 8 Complete: - ✅ Phase 8.1: Audio Input - ✅ Phase 8.2: Recording Controls (punch/overdub) - ✅ Phase 8.3: Recording Settings - 📋 Phase 9: Automation (NEXT) 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
354 lines
11 KiB
TypeScript
354 lines
11 KiB
TypeScript
'use client';
|
|
|
|
import * as React from 'react';
|
|
|
|
export interface RecordingState {
|
|
isRecording: boolean;
|
|
isPaused: boolean;
|
|
duration: number;
|
|
inputLevel: number;
|
|
}
|
|
|
|
export interface RecordingSettings {
|
|
inputGain: number; // 0.0 to 2.0 (1.0 = unity)
|
|
recordMono: boolean; // true = mono, false = stereo
|
|
sampleRate: number; // target sample rate (44100, 48000, etc.)
|
|
}
|
|
|
|
export interface UseRecordingReturn {
|
|
state: RecordingState;
|
|
settings: RecordingSettings;
|
|
startRecording: () => Promise<void>;
|
|
stopRecording: () => Promise<AudioBuffer | null>;
|
|
pauseRecording: () => void;
|
|
resumeRecording: () => void;
|
|
getInputDevices: () => Promise<MediaDeviceInfo[]>;
|
|
selectInputDevice: (deviceId: string) => Promise<void>;
|
|
requestPermission: () => Promise<boolean>;
|
|
setInputGain: (gain: number) => void;
|
|
setRecordMono: (mono: boolean) => void;
|
|
setSampleRate: (sampleRate: number) => void;
|
|
}
|
|
|
|
export function useRecording(): UseRecordingReturn {
|
|
const [state, setState] = React.useState<RecordingState>({
|
|
isRecording: false,
|
|
isPaused: false,
|
|
duration: 0,
|
|
inputLevel: 0,
|
|
});
|
|
|
|
const [settings, setSettings] = React.useState<RecordingSettings>({
|
|
inputGain: 1.0,
|
|
recordMono: false,
|
|
sampleRate: 48000,
|
|
});
|
|
|
|
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
|
|
const audioContextRef = React.useRef<AudioContext | null>(null);
|
|
const analyserRef = React.useRef<AnalyserNode | null>(null);
|
|
const gainNodeRef = React.useRef<GainNode | null>(null);
|
|
const streamRef = React.useRef<MediaStream | null>(null);
|
|
const chunksRef = React.useRef<Blob[]>([]);
|
|
const startTimeRef = React.useRef<number>(0);
|
|
const animationFrameRef = React.useRef<number>(0);
|
|
const selectedDeviceIdRef = React.useRef<string>('');
|
|
const isMonitoringRef = React.useRef<boolean>(false);
|
|
|
|
// Request microphone permission
|
|
const requestPermission = React.useCallback(async (): Promise<boolean> => {
|
|
try {
|
|
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
|
stream.getTracks().forEach((track) => track.stop());
|
|
return true;
|
|
} catch (error) {
|
|
console.error('Microphone permission denied:', error);
|
|
return false;
|
|
}
|
|
}, []);
|
|
|
|
// Get available input devices
|
|
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
|
|
try {
|
|
const devices = await navigator.mediaDevices.enumerateDevices();
|
|
return devices.filter((device) => device.kind === 'audioinput');
|
|
} catch (error) {
|
|
console.error('Failed to enumerate devices:', error);
|
|
return [];
|
|
}
|
|
}, []);
|
|
|
|
// Select input device
|
|
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
|
|
selectedDeviceIdRef.current = deviceId;
|
|
}, []);
|
|
|
|
// Convert linear amplitude to dB scale normalized to 0-1 range
|
|
const linearToDbScale = React.useCallback((linear: number): number => {
|
|
if (linear === 0) return 0;
|
|
|
|
// Convert to dB (20 * log10(linear))
|
|
const db = 20 * Math.log10(linear);
|
|
|
|
// Normalize -60dB to 0dB range to 0-1
|
|
// -60dB or lower = 0%, 0dB = 100%
|
|
const minDb = -60;
|
|
const maxDb = 0;
|
|
const normalized = (db - minDb) / (maxDb - minDb);
|
|
|
|
// Clamp to 0-1 range
|
|
return Math.max(0, Math.min(1, normalized));
|
|
}, []);
|
|
|
|
// Monitor input level
|
|
const monitorInputLevel = React.useCallback(() => {
|
|
if (!analyserRef.current) return;
|
|
|
|
const analyser = analyserRef.current;
|
|
const dataArray = new Float32Array(analyser.fftSize);
|
|
|
|
const updateLevel = () => {
|
|
if (!isMonitoringRef.current) return;
|
|
|
|
analyser.getFloatTimeDomainData(dataArray);
|
|
|
|
// Calculate peak level using float data (-1 to +1 range)
|
|
let peak = 0;
|
|
for (let i = 0; i < dataArray.length; i++) {
|
|
const abs = Math.abs(dataArray[i]);
|
|
if (abs > peak) {
|
|
peak = abs;
|
|
}
|
|
}
|
|
|
|
// Convert linear peak to logarithmic dB scale
|
|
const dbLevel = linearToDbScale(peak);
|
|
|
|
setState((prev) => ({ ...prev, inputLevel: dbLevel }));
|
|
|
|
animationFrameRef.current = requestAnimationFrame(updateLevel);
|
|
};
|
|
|
|
updateLevel();
|
|
}, [linearToDbScale]);
|
|
|
|
// Start recording
|
|
const startRecording = React.useCallback(async (): Promise<void> => {
|
|
try {
|
|
// Get user media with selected device
|
|
const constraints: MediaStreamConstraints = {
|
|
audio: selectedDeviceIdRef.current
|
|
? { deviceId: { exact: selectedDeviceIdRef.current } }
|
|
: true,
|
|
};
|
|
|
|
const stream = await navigator.mediaDevices.getUserMedia(constraints);
|
|
streamRef.current = stream;
|
|
|
|
// Create audio context with target sample rate
|
|
const audioContext = new AudioContext({ sampleRate: settings.sampleRate });
|
|
audioContextRef.current = audioContext;
|
|
|
|
const source = audioContext.createMediaStreamSource(stream);
|
|
|
|
// Create gain node for input gain control
|
|
const gainNode = audioContext.createGain();
|
|
gainNode.gain.value = settings.inputGain;
|
|
gainNodeRef.current = gainNode;
|
|
|
|
// Create analyser for level monitoring
|
|
const analyser = audioContext.createAnalyser();
|
|
analyser.fftSize = 256;
|
|
analyser.smoothingTimeConstant = 0.3;
|
|
|
|
// Connect: source -> gain -> analyser
|
|
source.connect(gainNode);
|
|
gainNode.connect(analyser);
|
|
analyserRef.current = analyser;
|
|
|
|
// Create MediaRecorder
|
|
const mediaRecorder = new MediaRecorder(stream);
|
|
mediaRecorderRef.current = mediaRecorder;
|
|
chunksRef.current = [];
|
|
|
|
mediaRecorder.ondataavailable = (event) => {
|
|
if (event.data.size > 0) {
|
|
chunksRef.current.push(event.data);
|
|
}
|
|
};
|
|
|
|
// Start recording
|
|
mediaRecorder.start();
|
|
startTimeRef.current = Date.now();
|
|
|
|
setState({
|
|
isRecording: true,
|
|
isPaused: false,
|
|
duration: 0,
|
|
inputLevel: 0,
|
|
});
|
|
|
|
// Start monitoring input level
|
|
isMonitoringRef.current = true;
|
|
monitorInputLevel();
|
|
} catch (error) {
|
|
console.error('Failed to start recording:', error);
|
|
throw error;
|
|
}
|
|
}, [monitorInputLevel, settings.sampleRate, settings.inputGain]);
|
|
|
|
// Stop recording and return AudioBuffer
|
|
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
|
|
return new Promise((resolve) => {
|
|
if (!mediaRecorderRef.current || !streamRef.current) {
|
|
resolve(null);
|
|
return;
|
|
}
|
|
|
|
const mediaRecorder = mediaRecorderRef.current;
|
|
|
|
mediaRecorder.onstop = async () => {
|
|
// Stop all tracks
|
|
streamRef.current?.getTracks().forEach((track) => track.stop());
|
|
|
|
// Create blob from recorded chunks
|
|
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
|
|
|
|
// Convert blob to AudioBuffer
|
|
try {
|
|
const arrayBuffer = await blob.arrayBuffer();
|
|
const audioContext = new AudioContext();
|
|
let audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
|
|
|
|
// Convert to mono if requested
|
|
if (settings.recordMono && audioBuffer.numberOfChannels > 1) {
|
|
const monoBuffer = audioContext.createBuffer(
|
|
1,
|
|
audioBuffer.length,
|
|
audioBuffer.sampleRate
|
|
);
|
|
const monoData = monoBuffer.getChannelData(0);
|
|
|
|
// Mix all channels to mono by averaging
|
|
for (let i = 0; i < audioBuffer.length; i++) {
|
|
let sum = 0;
|
|
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
|
|
sum += audioBuffer.getChannelData(channel)[i];
|
|
}
|
|
monoData[i] = sum / audioBuffer.numberOfChannels;
|
|
}
|
|
|
|
audioBuffer = monoBuffer;
|
|
}
|
|
|
|
// Clean up
|
|
isMonitoringRef.current = false;
|
|
if (audioContextRef.current) {
|
|
await audioContextRef.current.close();
|
|
}
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
}
|
|
|
|
setState({
|
|
isRecording: false,
|
|
isPaused: false,
|
|
duration: 0,
|
|
inputLevel: 0,
|
|
});
|
|
|
|
resolve(audioBuffer);
|
|
} catch (error) {
|
|
console.error('Failed to decode recorded audio:', error);
|
|
resolve(null);
|
|
}
|
|
};
|
|
|
|
mediaRecorder.stop();
|
|
});
|
|
}, [settings.recordMono]);
|
|
|
|
// Pause recording
|
|
const pauseRecording = React.useCallback(() => {
|
|
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
|
|
mediaRecorderRef.current.pause();
|
|
setState((prev) => ({ ...prev, isPaused: true }));
|
|
|
|
isMonitoringRef.current = false;
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
}
|
|
}
|
|
}, [state.isRecording, state.isPaused]);
|
|
|
|
// Resume recording
|
|
const resumeRecording = React.useCallback(() => {
|
|
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
|
|
mediaRecorderRef.current.resume();
|
|
setState((prev) => ({ ...prev, isPaused: false }));
|
|
isMonitoringRef.current = true;
|
|
monitorInputLevel();
|
|
}
|
|
}, [state.isRecording, state.isPaused, monitorInputLevel]);
|
|
|
|
// Update duration
|
|
React.useEffect(() => {
|
|
if (!state.isRecording || state.isPaused) return;
|
|
|
|
const interval = setInterval(() => {
|
|
const elapsed = (Date.now() - startTimeRef.current) / 1000;
|
|
setState((prev) => ({ ...prev, duration: elapsed }));
|
|
}, 100);
|
|
|
|
return () => clearInterval(interval);
|
|
}, [state.isRecording, state.isPaused]);
|
|
|
|
// Cleanup on unmount
|
|
React.useEffect(() => {
|
|
return () => {
|
|
isMonitoringRef.current = false;
|
|
if (streamRef.current) {
|
|
streamRef.current.getTracks().forEach((track) => track.stop());
|
|
}
|
|
if (audioContextRef.current) {
|
|
audioContextRef.current.close();
|
|
}
|
|
if (animationFrameRef.current) {
|
|
cancelAnimationFrame(animationFrameRef.current);
|
|
}
|
|
};
|
|
}, []);
|
|
|
|
// Settings setters
|
|
const setInputGain = React.useCallback((gain: number) => {
|
|
setSettings((prev) => ({ ...prev, inputGain: Math.max(0, Math.min(2, gain)) }));
|
|
// Update gain node if recording
|
|
if (gainNodeRef.current) {
|
|
gainNodeRef.current.gain.value = Math.max(0, Math.min(2, gain));
|
|
}
|
|
}, []);
|
|
|
|
const setRecordMono = React.useCallback((mono: boolean) => {
|
|
setSettings((prev) => ({ ...prev, recordMono: mono }));
|
|
}, []);
|
|
|
|
const setSampleRate = React.useCallback((sampleRate: number) => {
|
|
setSettings((prev) => ({ ...prev, sampleRate }));
|
|
}, []);
|
|
|
|
return {
|
|
state,
|
|
settings,
|
|
startRecording,
|
|
stopRecording,
|
|
pauseRecording,
|
|
resumeRecording,
|
|
getInputDevices,
|
|
selectInputDevice,
|
|
requestPermission,
|
|
setInputGain,
|
|
setRecordMono,
|
|
setSampleRate,
|
|
};
|
|
}
|