feat: implement Phase 8.1 - audio recording infrastructure
Added recording capabilities to the multi-track editor: - useRecording hook with MediaRecorder API integration - Audio input device enumeration and selection - Microphone permission handling - Input level monitoring with RMS calculation - InputLevelMeter component with visual feedback - Record-enable button per track with pulsing indicator - Real-time input level display when recording Recording infrastructure is complete. Next: integrate into AudioEditor for global recording control and buffer storage. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
258
lib/hooks/useRecording.ts
Normal file
258
lib/hooks/useRecording.ts
Normal file
@@ -0,0 +1,258 @@
|
||||
'use client';
|
||||
|
||||
import * as React from 'react';
|
||||
|
||||
export interface RecordingState {
|
||||
isRecording: boolean;
|
||||
isPaused: boolean;
|
||||
duration: number;
|
||||
inputLevel: number;
|
||||
}
|
||||
|
||||
export interface UseRecordingReturn {
|
||||
state: RecordingState;
|
||||
startRecording: () => Promise<void>;
|
||||
stopRecording: () => Promise<AudioBuffer | null>;
|
||||
pauseRecording: () => void;
|
||||
resumeRecording: () => void;
|
||||
getInputDevices: () => Promise<MediaDeviceInfo[]>;
|
||||
selectInputDevice: (deviceId: string) => Promise<void>;
|
||||
requestPermission: () => Promise<boolean>;
|
||||
}
|
||||
|
||||
export function useRecording(): UseRecordingReturn {
|
||||
const [state, setState] = React.useState<RecordingState>({
|
||||
isRecording: false,
|
||||
isPaused: false,
|
||||
duration: 0,
|
||||
inputLevel: 0,
|
||||
});
|
||||
|
||||
const mediaRecorderRef = React.useRef<MediaRecorder | null>(null);
|
||||
const audioContextRef = React.useRef<AudioContext | null>(null);
|
||||
const analyserRef = React.useRef<AnalyserNode | null>(null);
|
||||
const streamRef = React.useRef<MediaStream | null>(null);
|
||||
const chunksRef = React.useRef<Blob[]>([]);
|
||||
const startTimeRef = React.useRef<number>(0);
|
||||
const animationFrameRef = React.useRef<number>(0);
|
||||
const selectedDeviceIdRef = React.useRef<string>('');
|
||||
|
||||
// Request microphone permission
|
||||
const requestPermission = React.useCallback(async (): Promise<boolean> => {
|
||||
try {
|
||||
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
||||
stream.getTracks().forEach((track) => track.stop());
|
||||
return true;
|
||||
} catch (error) {
|
||||
console.error('Microphone permission denied:', error);
|
||||
return false;
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Get available input devices
|
||||
const getInputDevices = React.useCallback(async (): Promise<MediaDeviceInfo[]> => {
|
||||
try {
|
||||
const devices = await navigator.mediaDevices.enumerateDevices();
|
||||
return devices.filter((device) => device.kind === 'audioinput');
|
||||
} catch (error) {
|
||||
console.error('Failed to enumerate devices:', error);
|
||||
return [];
|
||||
}
|
||||
}, []);
|
||||
|
||||
// Select input device
|
||||
const selectInputDevice = React.useCallback(async (deviceId: string): Promise<void> => {
|
||||
selectedDeviceIdRef.current = deviceId;
|
||||
}, []);
|
||||
|
||||
// Monitor input level
|
||||
const monitorInputLevel = React.useCallback(() => {
|
||||
if (!analyserRef.current) return;
|
||||
|
||||
const analyser = analyserRef.current;
|
||||
const dataArray = new Uint8Array(analyser.frequencyBinCount);
|
||||
|
||||
const updateLevel = () => {
|
||||
analyser.getByteTimeDomainData(dataArray);
|
||||
|
||||
// Calculate RMS level
|
||||
let sum = 0;
|
||||
for (let i = 0; i < dataArray.length; i++) {
|
||||
const normalized = (dataArray[i] - 128) / 128;
|
||||
sum += normalized * normalized;
|
||||
}
|
||||
const rms = Math.sqrt(sum / dataArray.length);
|
||||
|
||||
setState((prev) => ({ ...prev, inputLevel: rms }));
|
||||
|
||||
if (state.isRecording && !state.isPaused) {
|
||||
animationFrameRef.current = requestAnimationFrame(updateLevel);
|
||||
}
|
||||
};
|
||||
|
||||
updateLevel();
|
||||
}, [state.isRecording, state.isPaused]);
|
||||
|
||||
// Start recording
|
||||
const startRecording = React.useCallback(async (): Promise<void> => {
|
||||
try {
|
||||
// Get user media with selected device
|
||||
const constraints: MediaStreamConstraints = {
|
||||
audio: selectedDeviceIdRef.current
|
||||
? { deviceId: { exact: selectedDeviceIdRef.current } }
|
||||
: true,
|
||||
};
|
||||
|
||||
const stream = await navigator.mediaDevices.getUserMedia(constraints);
|
||||
streamRef.current = stream;
|
||||
|
||||
// Create audio context and analyser for level monitoring
|
||||
const audioContext = new AudioContext();
|
||||
audioContextRef.current = audioContext;
|
||||
|
||||
const source = audioContext.createMediaStreamSource(stream);
|
||||
const analyser = audioContext.createAnalyser();
|
||||
analyser.fftSize = 256;
|
||||
analyser.smoothingTimeConstant = 0.3;
|
||||
|
||||
source.connect(analyser);
|
||||
analyserRef.current = analyser;
|
||||
|
||||
// Create MediaRecorder
|
||||
const mediaRecorder = new MediaRecorder(stream);
|
||||
mediaRecorderRef.current = mediaRecorder;
|
||||
chunksRef.current = [];
|
||||
|
||||
mediaRecorder.ondataavailable = (event) => {
|
||||
if (event.data.size > 0) {
|
||||
chunksRef.current.push(event.data);
|
||||
}
|
||||
};
|
||||
|
||||
// Start recording
|
||||
mediaRecorder.start();
|
||||
startTimeRef.current = Date.now();
|
||||
|
||||
setState({
|
||||
isRecording: true,
|
||||
isPaused: false,
|
||||
duration: 0,
|
||||
inputLevel: 0,
|
||||
});
|
||||
|
||||
// Start monitoring input level
|
||||
monitorInputLevel();
|
||||
} catch (error) {
|
||||
console.error('Failed to start recording:', error);
|
||||
throw error;
|
||||
}
|
||||
}, [monitorInputLevel]);
|
||||
|
||||
// Stop recording and return AudioBuffer
|
||||
const stopRecording = React.useCallback(async (): Promise<AudioBuffer | null> => {
|
||||
return new Promise((resolve) => {
|
||||
if (!mediaRecorderRef.current || !streamRef.current) {
|
||||
resolve(null);
|
||||
return;
|
||||
}
|
||||
|
||||
const mediaRecorder = mediaRecorderRef.current;
|
||||
|
||||
mediaRecorder.onstop = async () => {
|
||||
// Stop all tracks
|
||||
streamRef.current?.getTracks().forEach((track) => track.stop());
|
||||
|
||||
// Create blob from recorded chunks
|
||||
const blob = new Blob(chunksRef.current, { type: 'audio/webm' });
|
||||
|
||||
// Convert blob to AudioBuffer
|
||||
try {
|
||||
const arrayBuffer = await blob.arrayBuffer();
|
||||
const audioContext = new AudioContext();
|
||||
const audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
|
||||
|
||||
// Clean up
|
||||
if (audioContextRef.current) {
|
||||
await audioContextRef.current.close();
|
||||
}
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current);
|
||||
}
|
||||
|
||||
setState({
|
||||
isRecording: false,
|
||||
isPaused: false,
|
||||
duration: 0,
|
||||
inputLevel: 0,
|
||||
});
|
||||
|
||||
resolve(audioBuffer);
|
||||
} catch (error) {
|
||||
console.error('Failed to decode recorded audio:', error);
|
||||
resolve(null);
|
||||
}
|
||||
};
|
||||
|
||||
mediaRecorder.stop();
|
||||
});
|
||||
}, []);
|
||||
|
||||
// Pause recording
|
||||
const pauseRecording = React.useCallback(() => {
|
||||
if (mediaRecorderRef.current && state.isRecording && !state.isPaused) {
|
||||
mediaRecorderRef.current.pause();
|
||||
setState((prev) => ({ ...prev, isPaused: true }));
|
||||
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current);
|
||||
}
|
||||
}
|
||||
}, [state.isRecording, state.isPaused]);
|
||||
|
||||
// Resume recording
|
||||
const resumeRecording = React.useCallback(() => {
|
||||
if (mediaRecorderRef.current && state.isRecording && state.isPaused) {
|
||||
mediaRecorderRef.current.resume();
|
||||
setState((prev) => ({ ...prev, isPaused: false }));
|
||||
monitorInputLevel();
|
||||
}
|
||||
}, [state.isRecording, state.isPaused, monitorInputLevel]);
|
||||
|
||||
// Update duration
|
||||
React.useEffect(() => {
|
||||
if (!state.isRecording || state.isPaused) return;
|
||||
|
||||
const interval = setInterval(() => {
|
||||
const elapsed = (Date.now() - startTimeRef.current) / 1000;
|
||||
setState((prev) => ({ ...prev, duration: elapsed }));
|
||||
}, 100);
|
||||
|
||||
return () => clearInterval(interval);
|
||||
}, [state.isRecording, state.isPaused]);
|
||||
|
||||
// Cleanup on unmount
|
||||
React.useEffect(() => {
|
||||
return () => {
|
||||
if (streamRef.current) {
|
||||
streamRef.current.getTracks().forEach((track) => track.stop());
|
||||
}
|
||||
if (audioContextRef.current) {
|
||||
audioContextRef.current.close();
|
||||
}
|
||||
if (animationFrameRef.current) {
|
||||
cancelAnimationFrame(animationFrameRef.current);
|
||||
}
|
||||
};
|
||||
}, []);
|
||||
|
||||
return {
|
||||
state,
|
||||
startRecording,
|
||||
stopRecording,
|
||||
pauseRecording,
|
||||
resumeRecording,
|
||||
getInputDevices,
|
||||
selectInputDevice,
|
||||
requestPermission,
|
||||
};
|
||||
}
|
||||
Reference in New Issue
Block a user