fix(voice): cleanup audio context on processing init failure

This commit is contained in:
ServerBob 2026-03-05 06:41:13 +00:00
parent 7a85935eee
commit bdd5ff768e
1 changed files with 93 additions and 67 deletions

View File

@ -415,79 +415,105 @@ const VoiceProvider = memo(({ children }: TVoiceProviderProps) => {
let rnnoiseNode: AudioWorkletNode | undefined;
let gateNode: AudioWorkletNode | undefined;
if (useDeepFilterNet) {
const deepFilterSuppression = sensitivityToDeepFilterLevel(
devices.voiceSensitivity ?? 70
);
await withDeepFilterFetchProxy(async () => {
deepFilterCore = new DeepFilterNet3Core({
sampleRate: 48000,
noiseReductionLevel: deepFilterSuppression,
assetConfig: {
cdnUrl: DEEPFILTER_LOCAL_CDN_URL
}
});
await deepFilterCore.initialize();
deepFilterNode = await deepFilterCore.createAudioWorkletNode(ctx);
});
if (!deepFilterNode) {
throw new Error('DeepFilterNet worklet node was not created');
const cleanupLocalProcessing = async () => {
try {
gateNode?.disconnect();
rnnoiseNode?.disconnect();
deepFilterNode?.disconnect();
source.disconnect();
dest.disconnect();
deepFilterCore?.destroy?.();
} catch {
// ignore cleanup failures
}
// Apply again after node creation; pre-node setSuppressionLevel is a no-op.
deepFilterCore?.setSuppressionLevel(deepFilterSuppression);
current.connect(deepFilterNode);
current = deepFilterNode;
}
if (useRnnoise) {
await ctx.audioWorklet.addModule(NoiseSuppressorWorklet);
rnnoiseNode = new AudioWorkletNode(ctx, NoiseSuppressorWorklet_Name);
current.connect(rnnoiseNode);
current = rnnoiseNode;
}
if (useKeyboardSuppression) {
await ctx.audioWorklet.addModule(KeyboardNoiseGateWorklet);
gateNode = new AudioWorkletNode(ctx, 'keyboard-noise-gate-processor', {
parameterData: {
threshold: sensitivityToThreshold(devices.voiceSensitivity ?? 70),
floor: 0.04,
attack: 0.65,
release: 0.985
try {
if (ctx.state !== 'closed') {
await ctx.close();
}
});
current.connect(gateNode);
current = gateNode;
}
current.connect(dest);
audioProcessingRef.current = {
ctx,
source,
deepFilterNode,
deepFilterCore,
rnnoiseNode,
gateNode,
dest,
input
} catch {
// ignore context close failures
}
};
const chain = [
useDeepFilterNet ? 'DeepFilterNet' : '',
useRnnoise ? 'RNNoise' : '',
useKeyboardSuppression ? 'Keyboard Gate' : ''
]
.filter(Boolean)
.join(' + ');
try {
if (useDeepFilterNet) {
const deepFilterSuppression = sensitivityToDeepFilterLevel(
devices.voiceSensitivity ?? 70
);
await withDeepFilterFetchProxy(async () => {
deepFilterCore = new DeepFilterNet3Core({
sampleRate: 48000,
noiseReductionLevel: deepFilterSuppression,
assetConfig: {
cdnUrl: DEEPFILTER_LOCAL_CDN_URL
}
});
await deepFilterCore.initialize();
deepFilterNode = await deepFilterCore.createAudioWorkletNode(ctx);
});
if (!deepFilterNode) {
throw new Error('DeepFilterNet worklet node was not created');
}
// Apply again after node creation; pre-node setSuppressionLevel is a no-op.
deepFilterCore?.setSuppressionLevel(deepFilterSuppression);
current.connect(deepFilterNode);
current = deepFilterNode;
}
setMicProcessingStatus({
active: true,
chain: chain || 'none',
note: 'Client-side processing active'
});
if (useRnnoise) {
await ctx.audioWorklet.addModule(NoiseSuppressorWorklet);
rnnoiseNode = new AudioWorkletNode(ctx, NoiseSuppressorWorklet_Name);
current.connect(rnnoiseNode);
current = rnnoiseNode;
}
return dest.stream;
if (useKeyboardSuppression) {
await ctx.audioWorklet.addModule(KeyboardNoiseGateWorklet);
gateNode = new AudioWorkletNode(ctx, 'keyboard-noise-gate-processor', {
parameterData: {
threshold: sensitivityToThreshold(devices.voiceSensitivity ?? 70),
floor: 0.04,
attack: 0.65,
release: 0.985
}
});
current.connect(gateNode);
current = gateNode;
}
current.connect(dest);
audioProcessingRef.current = {
ctx,
source,
deepFilterNode,
deepFilterCore,
rnnoiseNode,
gateNode,
dest,
input
};
const chain = [
useDeepFilterNet ? 'DeepFilterNet' : '',
useRnnoise ? 'RNNoise' : '',
useKeyboardSuppression ? 'Keyboard Gate' : ''
]
.filter(Boolean)
.join(' + ');
setMicProcessingStatus({
active: true,
chain: chain || 'none',
note: 'Client-side processing active'
});
return dest.stream;
} catch (err) {
await cleanupLocalProcessing();
throw err;
}
}, [devices.noiseSuppressionDeepFilterNet, devices.noiseSuppressionRnnoise, devices.keyboardSuppression, devices.voiceSensitivity, cleanupAudioProcessing]);
const acquireMicStream = useCallback(async (): Promise<{ stream: MediaStream; track: MediaStreamTrack; raw: MediaStream }> => {
@ -537,7 +563,7 @@ const VoiceProvider = memo(({ children }: TVoiceProviderProps) => {
stopRawMic();
rawMicStreamRef.current = raw;
if (track) {
if (track) {
logVoice('Obtained audio track', { audioTrack: track });
localAudioProducer.current = await producerTransport.current?.produce({