emscripten audio: Added audio capture support.

This commit is contained in:
Ryan C. Gordon 2016-08-09 16:58:32 -04:00
parent 5de11a5fc5
commit 358a168c9d
1 changed files with 201 additions and 40 deletions

View File

@ -61,7 +61,6 @@ HandleAudioProcess(_THIS)
Uint8 *buf = NULL;
int byte_len = 0;
int bytes = SDL_AUDIO_BITSIZE(this->spec.format) / 8;
int bytes_in = SDL_AUDIO_BITSIZE(this->convert.src_format) / 8;
/* Only do something if audio is enabled */
if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) {
@ -69,6 +68,8 @@ HandleAudioProcess(_THIS)
}
if (this->convert.needed) {
const int bytes_in = SDL_AUDIO_BITSIZE(this->convert.src_format) / 8;
if (this->hidden->conv_in_len != 0) {
this->convert.len = this->hidden->conv_in_len * bytes_in * this->spec.channels;
}
@ -133,9 +134,100 @@ HandleAudioProcess(_THIS)
}
}
static void
HandleCaptureProcess(_THIS)
{
Uint8 *buf;
int buflen;
/* Only do something if audio is enabled */
if (!SDL_AtomicGet(&this->enabled) || SDL_AtomicGet(&this->paused)) {
return;
}
if (this->convert.needed) {
buf = this->convert.buf;
buflen = this->convert.len_cvt;
} else {
if (!this->hidden->mixbuf) {
this->hidden->mixbuf = (Uint8 *) SDL_malloc(this->spec.size);
if (!this->hidden->mixbuf) {
return; /* oh well. */
}
}
buf = this->hidden->mixbuf;
buflen = this->spec.size;
}
EM_ASM_ARGS({
var numChannels = SDL2.capture.currentCaptureBuffer.numberOfChannels;
if (numChannels == 1) { /* fastpath this a little for the common (mono) case. */
var channelData = SDL2.capture.currentCaptureBuffer.getChannelData(0);
if (channelData.length != $1) {
throw 'Web Audio capture buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
}
for (var j = 0; j < $1; ++j) {
setValue($0 + (j * 4), channelData[j], 'float');
}
} else {
for (var c = 0; c < numChannels; ++c) {
var channelData = SDL2.capture.currentCaptureBuffer.getChannelData(c);
if (channelData.length != $1) {
throw 'Web Audio capture buffer length mismatch! Destination size: ' + channelData.length + ' samples vs expected ' + $1 + ' samples!';
}
for (var j = 0; j < $1; ++j) {
setValue($0 + (((j * numChannels) + c) * 4), channelData[j], 'float');
}
}
}
}, buf, (this->spec.size / sizeof (float)) / this->spec.channels);
/* okay, we've got an interleaved float32 array in C now. */
if (this->convert.needed) {
SDL_ConvertAudio(&this->convert);
}
/* Send it to the app. */
(*this->spec.callback) (this->spec.userdata, buf, buflen);
}
static void
Emscripten_CloseDevice(_THIS)
{
EM_ASM_({
if ($0) {
if (SDL2.capture.silenceTimer !== undefined) {
clearTimeout(SDL2.capture.silenceTimer);
}
if (SDL2.capture.scriptProcessorNode !== undefined) {
SDL2.capture.scriptProcessorNode.disconnect();
SDL2.capture.scriptProcessorNode = undefined;
}
if (SDL2.capture.mediaStreamNode !== undefined) {
SDL2.capture.mediaStreamNode.disconnect();
SDL2.capture.mediaStreamNode = undefined;
}
if (SDL2.capture.silenceBuffer !== undefined) {
SDL2.capture.silenceBuffer = undefined
}
SDL2.capture = undefined;
} else {
if (SDL2.audio.scriptProcessorNode != undefined) {
SDL2.audio.scriptProcessorNode.disconnect();
SDL2.audio.scriptProcessorNode = undefined;
}
SDL2.audio = undefined;
}
if ((SDL2.audioContext !== undefined) && (SDL2.audio === undefined) && (SDL2.capture === undefined)) {
SDL2.audioContext.close();
SDL2.audioContext = undefined;
}
}, this->iscapture);
SDL_free(this->hidden->mixbuf);
SDL_free(this->hidden);
}
@ -144,11 +236,38 @@ static int
Emscripten_OpenDevice(_THIS, void *handle, const char *devname, int iscapture)
{
SDL_bool valid_format = SDL_FALSE;
SDL_AudioFormat test_format = SDL_FirstAudioFormat(this->spec.format);
SDL_AudioFormat test_format;
int i;
float f;
int result;
/* based on parts of library_sdl.js */
/* create context (TODO: this puts stuff in the global namespace...)*/
result = EM_ASM_INT({
if(typeof(SDL2) === 'undefined') {
SDL2 = {};
}
if (!$0) {
SDL2.audio = {};
} else {
SDL2.capture = {};
}
if (!SDL2.audioContext) {
if (typeof(AudioContext) !== 'undefined') {
SDL2.audioContext = new AudioContext();
} else if (typeof(webkitAudioContext) !== 'undefined') {
SDL2.audioContext = new webkitAudioContext();
}
}
return SDL2.audioContext === undefined ? -1 : 0;
}, iscapture);
if (result < 0) {
return SDL_SetError("Web Audio API is not available!");
}
test_format = SDL_FirstAudioFormat(this->spec.format);
while ((!valid_format) && (test_format)) {
switch (test_format) {
case AUDIO_F32: /* web audio only supports floats */
@ -173,34 +292,9 @@ Emscripten_OpenDevice(_THIS, void *handle, const char *devname, int iscapture)
}
SDL_zerop(this->hidden);
/* based on parts of library_sdl.js */
/* create context (TODO: this puts stuff in the global namespace...)*/
result = EM_ASM_INT_V({
if(typeof(SDL2) === 'undefined')
SDL2 = {};
if(typeof(SDL2.audio) === 'undefined')
SDL2.audio = {};
if (!SDL2.audioContext) {
if (typeof(AudioContext) !== 'undefined') {
SDL2.audioContext = new AudioContext();
} else if (typeof(webkitAudioContext) !== 'undefined') {
SDL2.audioContext = new webkitAudioContext();
} else {
return -1;
}
}
return 0;
});
if (result < 0) {
return SDL_SetError("Web Audio API is not available!");
}
/* limit to native freq */
int sampleRate = EM_ASM_INT_V({
return SDL2.audioContext['sampleRate'];
const int sampleRate = EM_ASM_INT_V({
return SDL2.audioContext.sampleRate;
});
if(this->spec.freq != sampleRate) {
@ -217,15 +311,71 @@ Emscripten_OpenDevice(_THIS, void *handle, const char *devname, int iscapture)
SDL_CalculateAudioSpec(&this->spec);
/* setup a ScriptProcessorNode */
EM_ASM_ARGS({
SDL2.audio.scriptProcessorNode = SDL2.audioContext['createScriptProcessor']($1, 0, $0);
SDL2.audio.scriptProcessorNode['onaudioprocess'] = function (e) {
SDL2.audio.currentOutputBuffer = e['outputBuffer'];
Runtime.dynCall('vi', $2, [$3]);
};
SDL2.audio.scriptProcessorNode['connect'](SDL2.audioContext['destination']);
}, this->spec.channels, this->spec.samples, HandleAudioProcess, this);
if (iscapture) {
/* The idea is to take the capture media stream, hook it up to an
audio graph where we can pass it through a ScriptProcessorNode
to access the raw PCM samples and push them to the SDL app's
callback. From there, we "process" the audio data into silence
and forget about it. */
/* This should, strictly speaking, use MediaRecorder for capture, but
this API is cleaner to use and better supported, and fires a
callback whenever there's enough data to fire down into the app.
The downside is that we are spending CPU time silencing a buffer
that the audiocontext uselessly mixes into any output. On the
upside, both of those things are not only run in native code in
the browser, they're probably SIMD code, too. MediaRecorder
feels like it's a pretty inefficient tapdance in similar ways,
to be honest. */
EM_ASM_({
var have_microphone = function(stream) {
clearTimeout(SDL2.capture.silenceTimer);
SDL2.capture.silenceTimer = undefined;
SDL2.capture.mediaStreamNode = SDL2.audioContext.createMediaStreamSource(stream);
SDL2.capture.scriptProcessorNode = SDL2.audioContext.createScriptProcessor($1, $0, 1);
SDL2.capture.scriptProcessorNode.onaudioprocess = function(audioProcessingEvent) {
audioProcessingEvent.outputBuffer.getChannelData(0).fill(0.0);
SDL2.capture.currentCaptureBuffer = audioProcessingEvent.inputBuffer;
Runtime.dynCall('vi', $2, [$3]);
};
SDL2.capture.mediaStreamNode.connect(SDL2.capture.scriptProcessorNode);
SDL2.capture.scriptProcessorNode.connect(SDL2.audioContext.destination);
};
var no_microphone = function(error) {
console.log('we DO NOT have a microphone! (' + error.name + ')...leaving silence callback running.');
};
/* we write silence to the audio callback until the microphone is available (user approves use, etc). */
SDL2.capture.silenceBuffer = SDL2.audioContext.createBuffer($0, $1, SDL2.audioContext.sampleRate);
SDL2.capture.silenceBuffer.getChannelData(0).fill(0.0);
var silence_callback = function() {
SDL2.capture.currentCaptureBuffer = SDL2.capture.silenceBuffer;
Runtime.dynCall('vi', $2, [$3]);
};
SDL2.capture.silenceTimer = setTimeout(silence_callback, $1 / SDL2.audioContext.sampleRate);
if ((navigator.mediaDevices !== undefined) && (navigator.mediaDevices.getUserMedia !== undefined)) {
navigator.mediaDevices.getUserMedia({ audio: true, video: false }).then(have_microphone).catch(no_microphone);
} else if (navigator.webkitGetUserMedia !== undefined) {
navigator.webkitGetUserMedia({ audio: true, video: false }, have_microphone, no_microphone);
}
}, this->spec.channels, this->spec.samples, HandleCaptureProcess, this);
} else {
/* setup a ScriptProcessorNode */
EM_ASM_ARGS({
SDL2.audio.scriptProcessorNode = SDL2.audioContext['createScriptProcessor']($1, 0, $0);
SDL2.audio.scriptProcessorNode['onaudioprocess'] = function (e) {
SDL2.audio.currentOutputBuffer = e['outputBuffer'];
Runtime.dynCall('vi', $2, [$3]);
};
SDL2.audio.scriptProcessorNode['connect'](SDL2.audioContext['destination']);
}, this->spec.channels, this->spec.samples, HandleAudioProcess, this);
}
return 0;
}
@ -236,7 +386,6 @@ Emscripten_Init(SDL_AudioDriverImpl * impl)
impl->OpenDevice = Emscripten_OpenDevice;
impl->CloseDevice = Emscripten_CloseDevice;
/* only one output */
impl->OnlyHasDefaultOutputDevice = 1;
/* no threads here */
@ -244,7 +393,7 @@ Emscripten_Init(SDL_AudioDriverImpl * impl)
impl->ProvidesOwnCallbackThread = 1;
/* check availability */
int available = EM_ASM_INT_V({
const int available = EM_ASM_INT_V({
if (typeof(AudioContext) !== 'undefined') {
return 1;
} else if (typeof(webkitAudioContext) !== 'undefined') {
@ -257,6 +406,18 @@ Emscripten_Init(SDL_AudioDriverImpl * impl)
SDL_SetError("No audio context available");
}
const int capture_available = available && EM_ASM_INT_V({
if ((typeof(navigator.mediaDevices) !== 'undefined') && (typeof(navigator.mediaDevices.getUserMedia) !== 'undefined')) {
return 1;
} else if (typeof(navigator.webkitGetUserMedia) !== 'undefined') {
return 1;
}
return 0;
});
impl->HasCaptureSupport = capture_available ? SDL_TRUE : SDL_FALSE;
impl->OnlyHasDefaultCaptureDevice = capture_available ? SDL_TRUE : SDL_FALSE;
return available;
}