Files
audio-ui/lib/audio/decoder.ts
Sebastian Krüger 908e6caaf8 feat: enhance mobile responsiveness with collapsible controls and automation/effects bars
Added comprehensive mobile support for Phase 15 (Polish & Optimization):

**Mobile Layout Enhancements:**
- Track controls now collapsible on mobile with two states:
  - Collapsed: minimal controls with expand chevron, R/M/S buttons, horizontal level meter
  - Expanded: full height fader, pan control, all buttons
- Track collapse buttons added to mobile view (left chevron for track collapse, right chevron for control collapse)
- Master controls collapse button hidden on desktop (lg:hidden)
- Automation and effects bars now available on mobile layout
- Both bars collapsible with eye/eye-off icons, horizontally scrollable when zoomed
- Mobile vertical stacking: controls → waveform → automation → effects per track

**Bug Fixes:**
- Fixed track controls and waveform container height matching on desktop
- Fixed Modal component prop: isOpen → open in all dialog components
- Fixed TypeScript null check for audioBuffer.duration
- Fixed keyboard shortcut category: 'help' → 'view'

**Technical Improvements:**
- Consistent height calculation using trackHeight variable
- Proper responsive breakpoints with Tailwind (sm:640px, lg:1024px)
- Progressive disclosure pattern for mobile controls

🤖 Generated with [Claude Code](https://claude.com/claude-code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-11-19 20:50:44 +01:00

282 lines
7.3 KiB
TypeScript

/**
* Audio file decoding utilities
*/
import { getAudioContext } from './context';
import { checkFileMemoryLimit, type MemoryCheckResult } from '../utils/memory-limits';
export interface ImportOptions {
convertToMono?: boolean;
targetSampleRate?: number; // If specified, resample to this rate
normalizeOnImport?: boolean;
}
export interface AudioFileInfo {
buffer: AudioBuffer;
metadata: AudioMetadata;
}
export interface AudioMetadata {
fileName: string;
fileSize: number;
fileType: string;
duration: number;
sampleRate: number;
channels: number;
bitDepth?: number;
codec?: string;
}
/**
* Decode an audio file to AudioBuffer with optional conversions
*/
export async function decodeAudioFile(
file: File,
options: ImportOptions = {}
): Promise<AudioBuffer> {
const arrayBuffer = await file.arrayBuffer();
const audioContext = getAudioContext();
try {
let audioBuffer = await audioContext.decodeAudioData(arrayBuffer);
// Apply conversions if requested
if (options.convertToMono && audioBuffer.numberOfChannels > 1) {
audioBuffer = convertToMono(audioBuffer);
}
if (options.targetSampleRate && audioBuffer.sampleRate !== options.targetSampleRate) {
audioBuffer = await resampleAudioBuffer(audioBuffer, options.targetSampleRate);
}
if (options.normalizeOnImport) {
audioBuffer = normalizeAudioBuffer(audioBuffer);
}
return audioBuffer;
} catch (error) {
throw new Error(`Failed to decode audio file: ${error}`);
}
}
/**
* Decode audio file and return both buffer and metadata
*/
export async function importAudioFile(
file: File,
options: ImportOptions = {}
): Promise<AudioFileInfo> {
const audioBuffer = await decodeAudioFile(file, options);
const metadata = extractMetadata(file, audioBuffer);
return {
buffer: audioBuffer,
metadata,
};
}
/**
* Convert stereo (or multi-channel) audio to mono
*/
function convertToMono(audioBuffer: AudioBuffer): AudioBuffer {
const audioContext = getAudioContext();
const numberOfChannels = audioBuffer.numberOfChannels;
if (numberOfChannels === 1) {
return audioBuffer; // Already mono
}
// Create a new mono buffer
const monoBuffer = audioContext.createBuffer(
1,
audioBuffer.length,
audioBuffer.sampleRate
);
const monoData = monoBuffer.getChannelData(0);
// Mix all channels equally
for (let i = 0; i < audioBuffer.length; i++) {
let sum = 0;
for (let channel = 0; channel < numberOfChannels; channel++) {
sum += audioBuffer.getChannelData(channel)[i];
}
monoData[i] = sum / numberOfChannels;
}
return monoBuffer;
}
/**
* Resample audio buffer to a different sample rate
*/
async function resampleAudioBuffer(
audioBuffer: AudioBuffer,
targetSampleRate: number
): Promise<AudioBuffer> {
const audioContext = getAudioContext();
// Create an offline context at the target sample rate
const offlineContext = new OfflineAudioContext(
audioBuffer.numberOfChannels,
Math.ceil(audioBuffer.duration * targetSampleRate),
targetSampleRate
);
// Create a buffer source
const source = offlineContext.createBufferSource();
source.buffer = audioBuffer;
source.connect(offlineContext.destination);
source.start(0);
// Render the audio at the new sample rate
const resampledBuffer = await offlineContext.startRendering();
return resampledBuffer;
}
/**
* Normalize audio buffer to peak amplitude
*/
function normalizeAudioBuffer(audioBuffer: AudioBuffer): AudioBuffer {
const audioContext = getAudioContext();
// Find peak amplitude across all channels
let peak = 0;
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
const channelData = audioBuffer.getChannelData(channel);
for (let i = 0; i < channelData.length; i++) {
const abs = Math.abs(channelData[i]);
if (abs > peak) peak = abs;
}
}
if (peak === 0 || peak === 1.0) {
return audioBuffer; // Already normalized or silent
}
// Create normalized buffer
const normalizedBuffer = audioContext.createBuffer(
audioBuffer.numberOfChannels,
audioBuffer.length,
audioBuffer.sampleRate
);
// Apply normalization with 1% headroom
const scale = 0.99 / peak;
for (let channel = 0; channel < audioBuffer.numberOfChannels; channel++) {
const inputData = audioBuffer.getChannelData(channel);
const outputData = normalizedBuffer.getChannelData(channel);
for (let i = 0; i < inputData.length; i++) {
outputData[i] = inputData[i] * scale;
}
}
return normalizedBuffer;
}
/**
* Extract metadata from file and audio buffer
*/
function extractMetadata(file: File, audioBuffer: AudioBuffer): AudioMetadata {
// Detect codec from file extension or MIME type
const codec = detectCodec(file);
return {
fileName: file.name,
fileSize: file.size,
fileType: file.type || 'unknown',
duration: audioBuffer.duration,
sampleRate: audioBuffer.sampleRate,
channels: audioBuffer.numberOfChannels,
codec,
};
}
/**
* Detect audio codec from file
*/
function detectCodec(file: File): string {
const ext = file.name.split('.').pop()?.toLowerCase();
const mimeType = file.type.toLowerCase();
if (mimeType.includes('wav') || ext === 'wav') return 'WAV (PCM)';
if (mimeType.includes('mpeg') || mimeType.includes('mp3') || ext === 'mp3') return 'MP3';
if (mimeType.includes('ogg') || ext === 'ogg') return 'OGG Vorbis';
if (mimeType.includes('flac') || ext === 'flac') return 'FLAC';
if (mimeType.includes('m4a') || mimeType.includes('aac') || ext === 'm4a') return 'AAC (M4A)';
if (ext === 'aiff' || ext === 'aif') return 'AIFF';
if (mimeType.includes('webm') || ext === 'webm') return 'WebM Opus';
return 'Unknown';
}
/**
* Get audio file metadata without decoding the entire file
*/
export async function getAudioFileMetadata(file: File): Promise<{
name: string;
size: number;
type: string;
}> {
return {
name: file.name,
size: file.size,
type: file.type,
};
}
/**
* Check if a file is a supported audio format
*/
export function isSupportedAudioFormat(file: File): boolean {
const supportedFormats = [
'audio/wav',
'audio/wave',
'audio/x-wav',
'audio/mpeg',
'audio/mp3',
'audio/ogg',
'audio/webm',
'audio/flac',
'audio/aac',
'audio/m4a',
'audio/x-m4a',
'audio/aiff',
'audio/x-aiff',
];
return supportedFormats.includes(file.type) ||
/\.(wav|mp3|ogg|webm|flac|aac|m4a|aiff|aif)$/i.test(file.name);
}
/**
* Check memory requirements for an audio file before decoding
* @param file File to check
* @returns Memory check result with warning if file is large
*/
export function checkAudioFileMemory(file: File): MemoryCheckResult {
return checkFileMemoryLimit(file.size);
}
/**
* Format duration in seconds to MM:SS format
*/
export function formatDuration(seconds: number): string {
const mins = Math.floor(seconds / 60);
const secs = Math.floor(seconds % 60);
return `${mins.toString().padStart(2, '0')}:${secs.toString().padStart(2, '0')}`;
}
/**
* Format file size to human-readable format
*/
export function formatFileSize(bytes: number): string {
if (bytes === 0) return '0 B';
const k = 1024;
const sizes = ['B', 'KB', 'MB', 'GB'];
const i = Math.floor(Math.log(bytes) / Math.log(k));
return `${parseFloat((bytes / Math.pow(k, i)).toFixed(2))} ${sizes[i]}`;
}