Modified Microphone implementation to handle only one device at a time (WIP)

This commit is contained in:
Marcelo Fernandez 2018-07-03 22:08:43 -03:00 committed by Saracen
parent 76fd9d215c
commit 061358d838
22 changed files with 556 additions and 1018 deletions

View file

@ -339,28 +339,6 @@ void AudioDriverALSA::finish() {
finish_device(); finish_device();
} }
bool AudioDriverALSA::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverALSA::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverALSA::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverALSA::capture_device_get_default_name() {
return "";
}
AudioDriverALSA::AudioDriverALSA() { AudioDriverALSA::AudioDriverALSA() {
mutex = NULL; mutex = NULL;

View file

@ -83,11 +83,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
AudioDriverALSA(); AudioDriverALSA();
~AudioDriverALSA(); ~AudioDriverALSA();
}; };

View file

@ -35,6 +35,7 @@
#include "os/os.h" #include "os/os.h"
#define kOutputBus 0 #define kOutputBus 0
#define kInputBus 1
#ifdef OSX_ENABLED #ifdef OSX_ENABLED
OSStatus AudioDriverCoreAudio::output_device_address_cb(AudioObjectID inObjectID, OSStatus AudioDriverCoreAudio::output_device_address_cb(AudioObjectID inObjectID,
@ -117,6 +118,11 @@ Error AudioDriverCoreAudio::init() {
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc)); result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &strdesc, sizeof(strdesc));
ERR_FAIL_COND_V(result != noErr, FAILED); ERR_FAIL_COND_V(result != noErr, FAILED);
strdesc.mChannelsPerFrame = 2;
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Output, kInputBus, &strdesc, sizeof(strdesc));
ERR_FAIL_COND_V(result != noErr, FAILED);
int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY); int latency = GLOBAL_DEF_RST("audio/output_latency", DEFAULT_OUTPUT_LATENCY);
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
buffer_frames = closest_power_of_2(latency * mix_rate / 1000); buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
@ -126,8 +132,14 @@ Error AudioDriverCoreAudio::init() {
ERR_FAIL_COND_V(result != noErr, FAILED); ERR_FAIL_COND_V(result != noErr, FAILED);
#endif #endif
buffer_size = buffer_frames * channels; unsigned int buffer_size = buffer_frames * channels;
samples_in.resize(buffer_size); samples_in.resize(buffer_size);
input_buf.resize(buffer_size);
audio_input_buffer.resize(buffer_size * 8);
for (int i = 0; i < audio_input_buffer.size(); i++) {
audio_input_buffer.write[i] = 0;
}
audio_input_position = 0;
if (OS::get_singleton()->is_stdout_verbose()) { if (OS::get_singleton()->is_stdout_verbose()) {
print_line("CoreAudio: detected " + itos(channels) + " channels"); print_line("CoreAudio: detected " + itos(channels) + " channels");
@ -141,6 +153,12 @@ Error AudioDriverCoreAudio::init() {
result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback)); result = AudioUnitSetProperty(audio_unit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input, kOutputBus, &callback, sizeof(callback));
ERR_FAIL_COND_V(result != noErr, FAILED); ERR_FAIL_COND_V(result != noErr, FAILED);
zeromem(&callback, sizeof(AURenderCallbackStruct));
callback.inputProc = &AudioDriverCoreAudio::input_callback;
callback.inputProcRefCon = this;
result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_SetInputCallback, kAudioUnitScope_Global, 0, &callback, sizeof(callback));
ERR_FAIL_COND_V(result != noErr, FAILED);
result = AudioUnitInitialize(audio_unit); result = AudioUnitInitialize(audio_unit);
ERR_FAIL_COND_V(result != noErr, FAILED); ERR_FAIL_COND_V(result != noErr, FAILED);
@ -192,6 +210,42 @@ OSStatus AudioDriverCoreAudio::output_callback(void *inRefCon,
return 0; return 0;
}; };
OSStatus AudioDriverCoreAudio::input_callback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData) {
AudioDriverCoreAudio *ad = (AudioDriverCoreAudio *)inRefCon;
if (!ad->active) {
return 0;
}
ad->lock();
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = ad->input_buf.ptrw();
bufferList.mBuffers[0].mNumberChannels = 2;
bufferList.mBuffers[0].mDataByteSize = ad->input_buf.size() * sizeof(int16_t);
OSStatus result = AudioUnitRender(ad->audio_unit, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
if (result == noErr) {
for (int i = 0; i < inNumberFrames * 2; i++) {
ad->audio_input_buffer.write[ad->audio_input_position++] = ad->input_buf[i] << 16;
if (ad->audio_input_position >= ad->audio_input_buffer.size()) {
ad->audio_input_position = 0;
}
}
} else {
ERR_PRINT(("AudioUnitRender failed, code: " + itos(result)).utf8().get_data());
}
ad->unlock();
return result;
}
void AudioDriverCoreAudio::start() { void AudioDriverCoreAudio::start() {
if (!active) { if (!active) {
OSStatus result = AudioOutputUnitStart(audio_unit); OSStatus result = AudioOutputUnitStart(audio_unit);
@ -434,26 +488,22 @@ void AudioDriverCoreAudio::finish() {
} }
}; };
bool AudioDriverCoreAudio::capture_device_start(StringName p_name) { Error AudioDriverCoreAudio::capture_start() {
return false; UInt32 flag = 1;
OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
ERR_FAIL_COND_V(result != noErr, FAILED);
return OK;
} }
bool AudioDriverCoreAudio::capture_device_stop(StringName p_name) { Error AudioDriverCoreAudio::capture_stop() {
return false; UInt32 flag = 0;
} OSStatus result = AudioUnitSetProperty(audio_unit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Input, kInputBus, &flag, sizeof(flag));
ERR_FAIL_COND_V(result != noErr, FAILED);
PoolStringArray AudioDriverCoreAudio::capture_device_get_names() { return OK;
PoolStringArray names;
return names;
}
StringName AudioDriverCoreAudio::capture_device_get_default_name() {
return "";
} }
AudioDriverCoreAudio::AudioDriverCoreAudio() { AudioDriverCoreAudio::AudioDriverCoreAudio() {
@ -463,7 +513,6 @@ AudioDriverCoreAudio::AudioDriverCoreAudio() {
mix_rate = 0; mix_rate = 0;
channels = 2; channels = 2;
buffer_size = 0;
buffer_frames = 0; buffer_frames = 0;
samples_in.clear(); samples_in.clear();

View file

@ -52,9 +52,9 @@ class AudioDriverCoreAudio : public AudioDriver {
int mix_rate; int mix_rate;
unsigned int channels; unsigned int channels;
unsigned int buffer_frames; unsigned int buffer_frames;
unsigned int buffer_size;
Vector<int32_t> samples_in; Vector<int32_t> samples_in;
Vector<int16_t> input_buf;
#ifdef OSX_ENABLED #ifdef OSX_ENABLED
static OSStatus output_device_address_cb(AudioObjectID inObjectID, static OSStatus output_device_address_cb(AudioObjectID inObjectID,
@ -68,6 +68,12 @@ class AudioDriverCoreAudio : public AudioDriver {
UInt32 inBusNumber, UInt32 inNumberFrames, UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData); AudioBufferList *ioData);
static OSStatus input_callback(void *inRefCon,
AudioUnitRenderActionFlags *ioActionFlags,
const AudioTimeStamp *inTimeStamp,
UInt32 inBusNumber, UInt32 inNumberFrames,
AudioBufferList *ioData);
public: public:
const char *get_name() const { const char *get_name() const {
return "CoreAudio"; return "CoreAudio";
@ -86,10 +92,8 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name); virtual Error capture_start();
virtual bool capture_device_stop(StringName p_name); virtual Error capture_stop();
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
bool try_lock(); bool try_lock();
void stop(); void stop();

View file

@ -287,74 +287,71 @@ float AudioDriverPulseAudio::get_latency() {
void AudioDriverPulseAudio::thread_func(void *p_udata) { void AudioDriverPulseAudio::thread_func(void *p_udata) {
AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)p_udata; AudioDriverPulseAudio *ad = (AudioDriverPulseAudio *)p_udata;
unsigned int write_ofs = 0;
size_t avail_bytes = 0;
while (!ad->exit_thread) { while (!ad->exit_thread) {
size_t read_bytes = 0;
size_t written_bytes = 0;
if (avail_bytes == 0) {
ad->lock();
ad->start_counting_ticks();
if (!ad->active) {
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
ad->samples_out.write[i] = 0;
}
} else {
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
if (ad->channels == ad->pa_map.channels) {
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
ad->samples_out.write[i] = ad->samples_in[i] >> 16;
}
} else {
// Uneven amount of channels
unsigned int in_idx = 0;
unsigned int out_idx = 0;
for (unsigned int i = 0; i < ad->buffer_frames; i++) {
for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
}
uint32_t l = ad->samples_in[in_idx++];
uint32_t r = ad->samples_in[in_idx++];
ad->samples_out.write[out_idx++] = (l >> 1 + r >> 1) >> 16;
}
}
}
avail_bytes = ad->pa_buffer_size * sizeof(int16_t);
write_ofs = 0;
ad->stop_counting_ticks();
ad->unlock();
}
ad->lock(); ad->lock();
ad->start_counting_ticks(); ad->start_counting_ticks();
if (!ad->active) {
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
ad->samples_out.write[i] = 0;
}
} else {
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
if (ad->channels == ad->pa_map.channels) {
for (unsigned int i = 0; i < ad->pa_buffer_size; i++) {
ad->samples_out.write[i] = ad->samples_in[i] >> 16;
}
} else {
// Uneven amount of channels
unsigned int in_idx = 0;
unsigned int out_idx = 0;
for (unsigned int i = 0; i < ad->buffer_frames; i++) {
for (unsigned int j = 0; j < ad->pa_map.channels - 1; j++) {
ad->samples_out.write[out_idx++] = ad->samples_in[in_idx++] >> 16;
}
uint32_t l = ad->samples_in[in_idx++];
uint32_t r = ad->samples_in[in_idx++];
ad->samples_out.write[out_idx++] = (l >> 1 + r >> 1) >> 16;
}
}
}
int error_code;
int byte_size = ad->pa_buffer_size * sizeof(int16_t);
int ret; int ret;
do { do {
ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL); ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL);
} while (ret > 0); } while (ret > 0);
if (pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) { if (avail_bytes > 0 && pa_stream_get_state(ad->pa_str) == PA_STREAM_READY) {
const void *ptr = ad->samples_out.ptr(); size_t bytes = pa_stream_writable_size(ad->pa_str);
while (byte_size > 0) { if (bytes > 0) {
size_t bytes = pa_stream_writable_size(ad->pa_str); size_t bytes_to_write = MIN(bytes, avail_bytes);
if (bytes > 0) { const void *ptr = ad->samples_out.ptr();
if (bytes > byte_size) { ret = pa_stream_write(ad->pa_str, ptr + write_ofs, bytes_to_write, NULL, 0LL, PA_SEEK_RELATIVE);
bytes = byte_size; if (ret != 0) {
} ERR_PRINT("pa_stream_write error");
ret = pa_stream_write(ad->pa_str, ptr, bytes, NULL, 0LL, PA_SEEK_RELATIVE);
if (ret >= 0) {
byte_size -= bytes;
ptr = (const char *)ptr + bytes;
}
} else { } else {
ret = pa_mainloop_iterate(ad->pa_ml, 0, NULL); avail_bytes -= bytes_to_write;
if (ret == 0) { write_ofs += bytes_to_write;
// If pa_mainloop_iterate returns 0 sleep for 1 msec to wait written_bytes += bytes_to_write;
// for the stream to be able to process more bytes
ad->stop_counting_ticks();
ad->unlock();
OS::get_singleton()->delay_usec(1000);
ad->lock();
ad->start_counting_ticks();
}
} }
} }
} }
@ -379,8 +376,41 @@ void AudioDriverPulseAudio::thread_func(void *p_udata) {
} }
} }
if (ad->pa_rec_str && pa_stream_get_state(ad->pa_rec_str) == PA_STREAM_READY) {
size_t bytes = pa_stream_readable_size(ad->pa_rec_str);
if (bytes > 0) {
const void *ptr = NULL;
size_t maxbytes = ad->audio_input_buffer.size() * sizeof(int16_t);
bytes = MIN(bytes, maxbytes);
ret = pa_stream_peek(ad->pa_rec_str, &ptr, &bytes);
if (ret != 0) {
ERR_PRINT("pa_stream_peek error");
} else {
int16_t *srcptr = (int16_t *)ptr;
for (size_t i = bytes >> 1; i > 0; i--) {
ad->audio_input_buffer.write[ad->audio_input_position++] = int32_t(*srcptr++) << 16;
if (ad->audio_input_position >= ad->audio_input_buffer.size()) {
ad->audio_input_position = 0;
}
}
read_bytes += bytes;
ret = pa_stream_drop(ad->pa_rec_str);
if (ret != 0) {
ERR_PRINT("pa_stream_drop error");
}
}
}
}
ad->stop_counting_ticks(); ad->stop_counting_ticks();
ad->unlock(); ad->unlock();
// Let the thread rest a while if we haven't read or write anything
if (written_bytes == 0 && read_bytes == 0) {
OS::get_singleton()->delay_usec(1000);
}
} }
ad->thread_exited = true; ad->thread_exited = true;
@ -510,26 +540,60 @@ void AudioDriverPulseAudio::finish() {
thread = NULL; thread = NULL;
} }
bool AudioDriverPulseAudio::capture_device_start(StringName p_name) { Error AudioDriverPulseAudio::capture_start() {
return false; Error err = OK;
lock();
pa_sample_spec spec;
spec.format = PA_SAMPLE_S16LE;
spec.channels = 2;
spec.rate = mix_rate;
int latency = 30;
input_buffer_frames = closest_power_of_2(latency * mix_rate / 1000);
int buffer_size = input_buffer_frames * spec.channels;
pa_buffer_attr attr;
attr.fragsize = buffer_size * sizeof(int16_t);
pa_channel_map pa_rec_map;
pa_channel_map_init_stereo(&pa_rec_map);
pa_rec_str = pa_stream_new(pa_ctx, "Record", &spec, &pa_rec_map);
if (pa_rec_str == NULL) {
ERR_PRINTS("PulseAudio: pa_stream_new error: " + String(pa_strerror(pa_context_errno(pa_ctx))));
ERR_FAIL_V(ERR_CANT_OPEN);
}
pa_stream_flags flags = pa_stream_flags(PA_STREAM_INTERPOLATE_TIMING | PA_STREAM_ADJUST_LATENCY | PA_STREAM_AUTO_TIMING_UPDATE);
int error_code = pa_stream_connect_record(pa_rec_str, NULL, &attr, flags);
if (error_code < 0) {
ERR_PRINTS("PulseAudio: pa_stream_connect_record error: " + String(pa_strerror(error_code)));
err = ERR_CANT_OPEN;
}
audio_input_buffer.resize(input_buffer_frames * 8);
for (int i = 0; i < audio_input_buffer.size(); i++) {
audio_input_buffer.write[i] = 0;
}
audio_input_position = 0;
unlock();
return err;
} }
bool AudioDriverPulseAudio::capture_device_stop(StringName p_name) { Error AudioDriverPulseAudio::capture_stop() {
if (pa_rec_str) {
pa_stream_disconnect(pa_rec_str);
pa_stream_unref(pa_rec_str);
pa_rec_str = NULL;
}
return false; return OK;
}
PoolStringArray AudioDriverPulseAudio::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverPulseAudio::capture_device_get_default_name() {
return "";
} }
AudioDriverPulseAudio::AudioDriverPulseAudio() { AudioDriverPulseAudio::AudioDriverPulseAudio() {
@ -537,6 +601,7 @@ AudioDriverPulseAudio::AudioDriverPulseAudio() {
pa_ml = NULL; pa_ml = NULL;
pa_ctx = NULL; pa_ctx = NULL;
pa_str = NULL; pa_str = NULL;
pa_rec_str = NULL;
mutex = NULL; mutex = NULL;
thread = NULL; thread = NULL;
@ -550,6 +615,7 @@ AudioDriverPulseAudio::AudioDriverPulseAudio() {
mix_rate = 0; mix_rate = 0;
buffer_frames = 0; buffer_frames = 0;
input_buffer_frames = 0;
pa_buffer_size = 0; pa_buffer_size = 0;
channels = 0; channels = 0;
pa_ready = 0; pa_ready = 0;

View file

@ -47,6 +47,7 @@ class AudioDriverPulseAudio : public AudioDriver {
pa_mainloop *pa_ml; pa_mainloop *pa_ml;
pa_context *pa_ctx; pa_context *pa_ctx;
pa_stream *pa_str; pa_stream *pa_str;
pa_stream *pa_rec_str;
pa_channel_map pa_map; pa_channel_map pa_map;
String device_name; String device_name;
@ -58,6 +59,7 @@ class AudioDriverPulseAudio : public AudioDriver {
unsigned int mix_rate; unsigned int mix_rate;
unsigned int buffer_frames; unsigned int buffer_frames;
unsigned int input_buffer_frames;
unsigned int pa_buffer_size; unsigned int pa_buffer_size;
int channels; int channels;
int pa_ready; int pa_ready;
@ -98,13 +100,11 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
virtual float get_latency(); virtual float get_latency();
virtual Error capture_start();
virtual Error capture_stop();
AudioDriverPulseAudio(); AudioDriverPulseAudio();
~AudioDriverPulseAudio(); ~AudioDriverPulseAudio();
}; };

View file

@ -194,28 +194,6 @@ void AudioDriverRtAudio::finish() {
} }
} }
bool AudioDriverRtAudio::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverRtAudio::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverRtAudio::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverRtAudio::capture_device_get_default_name() {
return "";
}
AudioDriverRtAudio::AudioDriverRtAudio() { AudioDriverRtAudio::AudioDriverRtAudio() {
active = false; active = false;

View file

@ -58,11 +58,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
AudioDriverRtAudio(); AudioDriverRtAudio();
}; };

View file

@ -142,7 +142,7 @@ public:
static CMMNotificationClient notif_client; static CMMNotificationClient notif_client;
Error AudioDriverWASAPI::init_render_device(bool reinit) { Error AudioDriverWASAPI::audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit) {
WAVEFORMATEX *pwfex; WAVEFORMATEX *pwfex;
IMMDeviceEnumerator *enumerator = NULL; IMMDeviceEnumerator *enumerator = NULL;
@ -153,12 +153,12 @@ Error AudioDriverWASAPI::init_render_device(bool reinit) {
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator); HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
if (device_name == "Default") { if (p_device->device_name == "Default") {
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device); hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
} else { } else {
IMMDeviceCollection *devices = NULL; IMMDeviceCollection *devices = NULL;
hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices); hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
LPWSTR strId = NULL; LPWSTR strId = NULL;
@ -184,7 +184,7 @@ Error AudioDriverWASAPI::init_render_device(bool reinit) {
hr = props->GetValue(PKEY_Device_FriendlyName, &propvar); hr = props->GetValue(PKEY_Device_FriendlyName, &propvar);
ERR_BREAK(hr != S_OK); ERR_BREAK(hr != S_OK);
if (device_name == String(propvar.pwszVal)) { if (p_device->device_name == String(propvar.pwszVal)) {
hr = device->GetId(&strId); hr = device->GetId(&strId);
ERR_BREAK(hr != S_OK); ERR_BREAK(hr != S_OK);
@ -205,9 +205,10 @@ Error AudioDriverWASAPI::init_render_device(bool reinit) {
} }
if (device == NULL) { if (device == NULL) {
hr = enumerator->GetDefaultAudioEndpoint(eRender, eConsole, &device); hr = enumerator->GetDefaultAudioEndpoint(p_capture ? eCapture : eRender, eConsole, &device);
} }
} }
if (reinit) { if (reinit) {
// In case we're trying to re-initialize the device prevent throwing this error on the console, // In case we're trying to re-initialize the device prevent throwing this error on the console,
// otherwise if there is currently no device available this will spam the console. // otherwise if there is currently no device available this will spam the console.
@ -225,7 +226,7 @@ Error AudioDriverWASAPI::init_render_device(bool reinit) {
ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error"); ERR_PRINT("WASAPI: RegisterEndpointNotificationCallback error");
} }
hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&audio_client); hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&p_device->audio_client);
SAFE_RELEASE(device) SAFE_RELEASE(device)
if (reinit) { if (reinit) {
@ -236,282 +237,141 @@ Error AudioDriverWASAPI::init_render_device(bool reinit) {
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
} }
hr = audio_client->GetMixFormat(&pwfex); hr = p_device->audio_client->GetMixFormat(&pwfex);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Since we're using WASAPI Shared Mode we can't control any of these, we just tag along // Since we're using WASAPI Shared Mode we can't control any of these, we just tag along
wasapi_channels = pwfex->nChannels; p_device->channels = pwfex->nChannels;
format_tag = pwfex->wFormatTag; p_device->format_tag = pwfex->wFormatTag;
bits_per_sample = pwfex->wBitsPerSample; p_device->bits_per_sample = pwfex->wBitsPerSample;
p_device->frame_size = (p_device->bits_per_sample / 8) * p_device->channels;
switch (wasapi_channels) { if (p_device->format_tag == WAVE_FORMAT_EXTENSIBLE) {
case 2: // Stereo
case 4: // Surround 3.1
case 6: // Surround 5.1
case 8: // Surround 7.1
channels = wasapi_channels;
break;
default:
WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(wasapi_channels));
channels = 2;
break;
}
if (format_tag == WAVE_FORMAT_EXTENSIBLE) {
WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex; WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;
if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) { if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
format_tag = WAVE_FORMAT_PCM; p_device->format_tag = WAVE_FORMAT_PCM;
} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) { } else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
format_tag = WAVE_FORMAT_IEEE_FLOAT; p_device->format_tag = WAVE_FORMAT_IEEE_FLOAT;
} else { } else {
ERR_PRINT("WASAPI: Format not supported"); ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN); ERR_FAIL_V(ERR_CANT_OPEN);
} }
} else { } else {
if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) { if (p_device->format_tag != WAVE_FORMAT_PCM && p_device->format_tag != WAVE_FORMAT_IEEE_FLOAT) {
ERR_PRINT("WASAPI: Format not supported"); ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN); ERR_FAIL_V(ERR_CANT_OPEN);
} }
} }
DWORD streamflags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK; DWORD streamflags = 0;
if (mix_rate != pwfex->nSamplesPerSec) { if (mix_rate != pwfex->nSamplesPerSec) {
streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST; streamflags |= AUDCLNT_STREAMFLAGS_RATEADJUST;
pwfex->nSamplesPerSec = mix_rate; pwfex->nSamplesPerSec = mix_rate;
pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8); pwfex->nAvgBytesPerSec = pwfex->nSamplesPerSec * pwfex->nChannels * (pwfex->wBitsPerSample / 8);
} }
hr = audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, 0, 0, pwfex, NULL); hr = p_device->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, streamflags, 0, p_capture ? REFTIMES_PER_SEC : 0, pwfex, NULL);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
event = CreateEvent(NULL, FALSE, FALSE, NULL); if (p_capture) {
ERR_FAIL_COND_V(event == NULL, ERR_CANT_OPEN); hr = p_device->audio_client->GetService(IID_IAudioCaptureClient, (void **)&p_device->capture_client);
} else {
hr = audio_client->SetEventHandle(event); hr = p_device->audio_client->GetService(IID_IAudioRenderClient, (void **)&p_device->render_client);
}
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
hr = audio_client->GetService(IID_IAudioRenderClient, (void **)&render_client); // Free memory
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); CoTaskMemFree(pwfex);
SAFE_RELEASE(device)
return OK;
}
Error AudioDriverWASAPI::init_render_device(bool reinit) {
Error err = audio_device_init(&audio_output, false, reinit);
if (err != OK)
return err;
switch (audio_output.channels) {
case 2: // Stereo
case 4: // Surround 3.1
case 6: // Surround 5.1
case 8: // Surround 7.1
channels = audio_output.channels;
break;
default:
WARN_PRINTS("WASAPI: Unsupported number of channels: " + itos(audio_output.channels));
channels = 2;
break;
}
UINT32 max_frames; UINT32 max_frames;
hr = audio_client->GetBufferSize(&max_frames); HRESULT hr = audio_output.audio_client->GetBufferSize(&max_frames);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN); ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Due to WASAPI Shared Mode we have no control of the buffer size // Due to WASAPI Shared Mode we have no control of the buffer size
buffer_frames = max_frames; buffer_frames = max_frames;
// Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels) // Sample rate is independent of channels (ref: https://stackoverflow.com/questions/11048825/audio-sample-frequency-rely-on-channels)
buffer_size = buffer_frames * channels; samples_in.resize(buffer_frames * channels);
samples_in.resize(buffer_size);
if (OS::get_singleton()->is_stdout_verbose()) { if (OS::get_singleton()->is_stdout_verbose()) {
print_line("WASAPI: detected " + itos(channels) + " channels"); print_line("WASAPI: detected " + itos(channels) + " channels");
print_line("WASAPI: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms"); print_line("WASAPI: audio buffer frames: " + itos(buffer_frames) + " calculated latency: " + itos(buffer_frames * 1000 / mix_rate) + "ms");
} }
// Free memory return OK;
CoTaskMemFree(pwfex); }
Error AudioDriverWASAPI::init_capture_device(bool reinit) {
Error err = audio_device_init(&audio_input, true, reinit);
if (err != OK)
return err;
// Get the max frames
UINT32 max_frames;
HRESULT hr = audio_input.audio_client->GetBufferSize(&max_frames);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Set the buffer size
audio_input_buffer.resize(max_frames * 8);
for (int i = 0; i < audio_input_buffer.size(); i++) {
audio_input_buffer.write[i] = 0;
}
audio_input_position = 0;
return OK; return OK;
} }
StringName AudioDriverWASAPI::get_default_capture_device_name(IMMDeviceEnumerator *p_enumerator) { Error AudioDriverWASAPI::audio_device_finish(AudioDeviceWASAPI *p_device) {
// Setup default device
IMMDevice *default_device = NULL;
LPWSTR pwszID = NULL;
IPropertyStore *props = NULL;
HRESULT hr = p_enumerator->GetDefaultAudioEndpoint( if (p_device->active) {
eCapture, eConsole, &default_device); if (p_device->audio_client) {
ERR_FAIL_COND_V(hr != S_OK, ""); p_device->audio_client->Stop();
// Get the device ID
hr = default_device->GetId(&pwszID);
ERR_FAIL_COND_V(hr != S_OK, "");
// Get the device properties
hr = default_device->OpenPropertyStore(
STGM_READ, &props);
ERR_FAIL_COND_V(hr != S_OK, "");
PROPVARIANT var_name;
PropVariantInit(&var_name);
// Get the name of the device
hr = props->GetValue(PKEY_Device_FriendlyName, &var_name);
ERR_FAIL_COND_V(hr != S_OK, "");
// Return the name of device
return String(var_name.pwszVal);
}
Error AudioDriverWASAPI::init_capture_devices(bool reinit) {
WAVEFORMATEX *pwfex;
IMMDeviceEnumerator *enumerator = NULL;
IMMDeviceCollection *device_collection = NULL;
IPropertyStore *props = NULL;
capture_device_id_map.clear();
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
capture_device_default_name = get_default_capture_device_name(enumerator);
// Enumerate a collection of valid devices
hr = enumerator->EnumAudioEndpoints(eCapture, DEVICE_STATE_ACTIVE, &device_collection);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
SAFE_RELEASE(enumerator);
UINT count;
hr = device_collection->GetCount(&count);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Loop through the device count
for (unsigned int i = 0; i < count; i++) {
IMMDevice *device = NULL;
LPWSTR pwszID = NULL;
// Get the device
hr = device_collection->Item(i, &device);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Get the device ID
hr = device->GetId(&pwszID);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Get the device properties
hr = device->OpenPropertyStore(STGM_READ, &props);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
PROPVARIANT var_name;
PropVariantInit(&var_name);
// Get the name of the device
hr = props->GetValue(PKEY_Device_FriendlyName, &var_name);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Save the name of device
StringName name = String(var_name.pwszVal);
// DEBUG: print the device name and ID
printf("Endpoint %d: \"%S\" (%S)\n", i, var_name.pwszVal, pwszID);
capture_device_id_map[StringName(pwszID)] = name;
// Cleanup the ID and properties
CoTaskMemFree(pwszID);
pwszID = NULL;
PropVariantClear(&var_name);
SAFE_RELEASE(props)
// Create a new audio in block descriptor
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output_wasapi = memnew(MicrophoneDeviceOutputDirectWASAPI);
microphone_device_output_wasapi->name = name;
microphone_device_output_wasapi->active = false;
// Push it into the list and assign it to the hash map for quick access
microphone_device_outputs.push_back(microphone_device_output_wasapi);
microphone_device_output_map[name] = microphone_device_output_wasapi;
// Activate the device
hr = device->Activate(IID_IAudioClient, CLSCTX_ALL, NULL, (void **)&microphone_device_output_wasapi->audio_client);
SAFE_RELEASE(device)
// Get the sample rate (hz)
hr = microphone_device_output_wasapi->audio_client->GetMixFormat(&pwfex);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
microphone_device_output_wasapi->channels = pwfex->nChannels;
microphone_device_output_wasapi->mix_rate = pwfex->nSamplesPerSec;
microphone_device_output_wasapi->bits_per_sample = pwfex->wBitsPerSample;
microphone_device_output_wasapi->frame_size = (microphone_device_output_wasapi->bits_per_sample / 8) * microphone_device_output_wasapi->channels;
microphone_device_output_wasapi->current_capture_index = 0;
microphone_device_output_wasapi->current_capture_size = 0;
WORD format_tag = pwfex->wFormatTag;
if (format_tag == WAVE_FORMAT_EXTENSIBLE) {
WAVEFORMATEXTENSIBLE *wfex = (WAVEFORMATEXTENSIBLE *)pwfex;
if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_PCM) {
format_tag = WAVE_FORMAT_PCM;
} else if (wfex->SubFormat == KSDATAFORMAT_SUBTYPE_IEEE_FLOAT) {
format_tag = WAVE_FORMAT_IEEE_FLOAT;
} else {
ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN);
}
} else {
if (format_tag != WAVE_FORMAT_PCM && format_tag != WAVE_FORMAT_IEEE_FLOAT) {
ERR_PRINT("WASAPI: Format not supported");
ERR_FAIL_V(ERR_CANT_OPEN);
}
} }
microphone_device_output_wasapi->capture_format_tag = format_tag;
hr = microphone_device_output_wasapi->audio_client->Initialize(AUDCLNT_SHAREMODE_SHARED, 0, REFTIMES_PER_SEC, 0, pwfex, NULL); p_device->active = false;
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Get the max frames
UINT32 max_frames;
hr = microphone_device_output_wasapi->audio_client->GetBufferSize(&max_frames);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// Set the buffer size
microphone_device_output_wasapi->buffer.resize(max_frames);
memset(microphone_device_output_wasapi->buffer.ptrw(), 0x00, microphone_device_output_wasapi->buffer.size() * microphone_device_output_wasapi->frame_size);
// Get the capture client
hr = microphone_device_output_wasapi->audio_client->GetService(IID_IAudioCaptureClient, (void **)&microphone_device_output_wasapi->capture_client);
ERR_FAIL_COND_V(hr != S_OK, ERR_CANT_OPEN);
// TODO: set audio write stream to correct format
REFERENCE_TIME hns_actual_duration = (double)REFTIMES_PER_SEC * max_frames / pwfex->nSamplesPerSec;
// Free memory
CoTaskMemFree(pwfex);
SAFE_RELEASE(device)
} }
SAFE_RELEASE(device_collection)
SAFE_RELEASE(p_device->audio_client)
SAFE_RELEASE(p_device->render_client)
SAFE_RELEASE(p_device->capture_client)
return OK; return OK;
} }
Error AudioDriverWASAPI::finish_render_device() { Error AudioDriverWASAPI::finish_render_device() {
if (audio_client) { return audio_device_finish(&audio_output);
if (active) {
audio_client->Stop();
active = false;
}
audio_client->Release();
audio_client = NULL;
}
SAFE_RELEASE(render_client)
SAFE_RELEASE(audio_client)
return OK;
} }
Error AudioDriverWASAPI::finish_capture_devices() { Error AudioDriverWASAPI::finish_capture_device() {
microphone_device_output_map.clear(); return audio_device_finish(&audio_input);
while (microphone_device_outputs.size() > 0) {
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output = static_cast<MicrophoneDeviceOutputDirectWASAPI *>(microphone_device_outputs.get(0));
SAFE_RELEASE(microphone_device_output->capture_client)
SAFE_RELEASE(microphone_device_output->audio_client)
microphone_device_outputs.erase(microphone_device_output);
memdelete(microphone_device_output);
}
return OK;
} }
Error AudioDriverWASAPI::init() { Error AudioDriverWASAPI::init() {
@ -523,12 +383,6 @@ Error AudioDriverWASAPI::init() {
ERR_PRINT("WASAPI: init_render_device error"); ERR_PRINT("WASAPI: init_render_device error");
} }
err = init_capture_devices();
if (err != OK) {
ERR_PRINT("WASAPI: init_capture_device error");
}
active = false;
exit_thread = false; exit_thread = false;
thread_exited = false; thread_exited = false;
@ -548,7 +402,7 @@ AudioDriver::SpeakerMode AudioDriverWASAPI::get_speaker_mode() const {
return get_speaker_mode_by_total_channels(channels); return get_speaker_mode_by_total_channels(channels);
} }
Array AudioDriverWASAPI::get_device_list() { Array AudioDriverWASAPI::audio_device_get_list(bool p_capture) {
Array list; Array list;
IMMDeviceCollection *devices = NULL; IMMDeviceCollection *devices = NULL;
@ -561,7 +415,7 @@ Array AudioDriverWASAPI::get_device_list() {
HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator); HRESULT hr = CoCreateInstance(CLSID_MMDeviceEnumerator, NULL, CLSCTX_ALL, IID_IMMDeviceEnumerator, (void **)&enumerator);
ERR_FAIL_COND_V(hr != S_OK, Array()); ERR_FAIL_COND_V(hr != S_OK, Array());
hr = enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE, &devices); hr = enumerator->EnumAudioEndpoints(p_capture ? eCapture : eRender, DEVICE_STATE_ACTIVE, &devices);
ERR_FAIL_COND_V(hr != S_OK, Array()); ERR_FAIL_COND_V(hr != S_OK, Array());
UINT count = 0; UINT count = 0;
@ -596,19 +450,28 @@ Array AudioDriverWASAPI::get_device_list() {
return list; return list;
} }
Array AudioDriverWASAPI::get_device_list() {
return audio_device_get_list(false);
}
String AudioDriverWASAPI::get_device() { String AudioDriverWASAPI::get_device() {
return device_name; lock();
String name = audio_output.device_name;
unlock();
return name;
} }
void AudioDriverWASAPI::set_device(String device) { void AudioDriverWASAPI::set_device(String device) {
lock(); lock();
new_device = device; audio_output.new_device = device;
unlock(); unlock();
} }
float AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i) { int32_t AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i) {
if (format_tag == WAVE_FORMAT_PCM) { if (format_tag == WAVE_FORMAT_PCM) {
int32_t sample = 0; int32_t sample = 0;
switch (bits_per_sample) { switch (bits_per_sample) {
@ -631,19 +494,19 @@ float AudioDriverWASAPI::read_sample(WORD format_tag, int bits_per_sample, BYTE
break; break;
} }
return (sample >> 16) / 32768.f; return sample;
} else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) { } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
return ((float *)buffer)[i]; return int32_t(((float *)buffer)[i] * 32768.0) << 16;
} else { } else {
ERR_PRINT("WASAPI: Unknown format tag"); ERR_PRINT("WASAPI: Unknown format tag");
} }
return 0.f; return 0;
} }
void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample) { void AudioDriverWASAPI::write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample) {
if (ad->format_tag == WAVE_FORMAT_PCM) { if (format_tag == WAVE_FORMAT_PCM) {
switch (ad->bits_per_sample) { switch (bits_per_sample) {
case 8: case 8:
((int8_t *)buffer)[i] = sample >> 24; ((int8_t *)buffer)[i] = sample >> 24;
break; break;
@ -662,157 +525,99 @@ void AudioDriverWASAPI::write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i,
((int32_t *)buffer)[i] = sample; ((int32_t *)buffer)[i] = sample;
break; break;
} }
} else if (ad->format_tag == WAVE_FORMAT_IEEE_FLOAT) { } else if (format_tag == WAVE_FORMAT_IEEE_FLOAT) {
((float *)buffer)[i] = (sample >> 16) / 32768.f; ((float *)buffer)[i] = (sample >> 16) / 32768.f;
} else { } else {
ERR_PRINT("WASAPI: Unknown format tag"); ERR_PRINT("WASAPI: Unknown format tag");
ad->exit_thread = true;
} }
} }
void AudioDriverWASAPI::thread_func(void *p_udata) { void AudioDriverWASAPI::thread_func(void *p_udata) {
AudioDriverWASAPI *ad = (AudioDriverWASAPI *)p_udata; AudioDriverWASAPI *ad = (AudioDriverWASAPI *)p_udata;
uint32_t avail_frames = 0;
uint32_t write_ofs = 0;
while (!ad->exit_thread) { while (!ad->exit_thread) {
// Capture
if (default_capture_device_changed) { uint32_t read_frames = 0;
if (ad->capture_device_id_map.has(capture_device_id)) { uint32_t written_frames = 0;
Map<StringName, StringName>::Element *e = ad->capture_device_id_map.find(capture_device_id);
ad->lock();
ad->start_counting_ticks();
ad->capture_device_default_name = e->get(); if (avail_frames == 0) {
ad->update_microphone_default(ad->capture_device_default_name); ad->lock();
ad->start_counting_ticks();
default_capture_device_changed = false; if (ad->audio_output.active) {
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
ad->stop_counting_ticks(); } else {
ad->unlock(); for (unsigned int i = 0; i < ad->samples_in.size(); i++) {
} ad->samples_in.write[i] = 0;
}
for (int i = 0; i < ad->microphone_device_outputs.size(); i++) {
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output_wasapi = static_cast<MicrophoneDeviceOutputDirectWASAPI *>(ad->microphone_device_outputs[i]);
if (microphone_device_output_wasapi->active == false) {
continue;
}
UINT32 packet_length = 0;
BYTE *data;
UINT32 num_frames_available;
DWORD flags;
HRESULT hr = microphone_device_output_wasapi->capture_client->GetNextPacketSize(&packet_length);
ERR_BREAK(hr != S_OK);
while (packet_length != 0) {
hr = microphone_device_output_wasapi->capture_client->GetBuffer(&data, &num_frames_available, &flags, NULL, NULL);
ERR_BREAK(hr != S_OK);
unsigned int frames_to_copy = num_frames_available;
if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
memset((char *)(microphone_device_output_wasapi->buffer.ptrw()) + (microphone_device_output_wasapi->current_capture_index * microphone_device_output_wasapi->frame_size), 0, frames_to_copy * microphone_device_output_wasapi->frame_size);
} else {
// fixme: Only works for floating point atm
for (int j = 0; j < frames_to_copy; j++) {
float l, r;
if (microphone_device_output_wasapi->channels == 2) {
l = read_sample(microphone_device_output_wasapi->capture_format_tag, microphone_device_output_wasapi->bits_per_sample, data, j * 2);
r = read_sample(microphone_device_output_wasapi->capture_format_tag, microphone_device_output_wasapi->bits_per_sample, data, j * 2 + 1);
} else if (microphone_device_output_wasapi->channels == 1) {
l = r = read_sample(microphone_device_output_wasapi->capture_format_tag, microphone_device_output_wasapi->bits_per_sample, data, j);
} else {
l = r = 0.f;
ERR_PRINT("WASAPI: unsupported channel count in microphone!");
}
microphone_device_output_wasapi->buffer[microphone_device_output_wasapi->current_capture_index++] = AudioFrame(l, r);
if (microphone_device_output_wasapi->current_capture_index >= microphone_device_output_wasapi->buffer.size()) {
microphone_device_output_wasapi->current_capture_index = 0;
}
if (microphone_device_output_wasapi->current_capture_size < microphone_device_output_wasapi->buffer.size()) {
microphone_device_output_wasapi->current_capture_size++;
}
}
} }
hr = microphone_device_output_wasapi->capture_client->ReleaseBuffer(num_frames_available);
ERR_BREAK(hr != S_OK);
hr = microphone_device_output_wasapi->capture_client->GetNextPacketSize(&packet_length);
ERR_BREAK(hr != S_OK);
} }
avail_frames = ad->buffer_frames;
write_ofs = 0;
ad->stop_counting_ticks();
ad->unlock();
} }
ad->lock(); ad->lock();
ad->start_counting_ticks(); ad->start_counting_ticks();
if (ad->active) { if (avail_frames > 0 && ad->audio_output.audio_client) {
ad->audio_server_process(ad->buffer_frames, ad->samples_in.ptrw());
} else {
for (unsigned int i = 0; i < ad->buffer_size; i++) {
ad->samples_in.write[i] = 0;
}
}
ad->stop_counting_ticks();
ad->unlock();
unsigned int left_frames = ad->buffer_frames;
unsigned int buffer_idx = 0;
while (left_frames > 0 && ad->audio_client) {
WaitForSingleObject(ad->event, 1000);
ad->lock();
ad->start_counting_ticks();
UINT32 cur_frames; UINT32 cur_frames;
bool invalidated = false; bool invalidated = false;
HRESULT hr = ad->audio_client->GetCurrentPadding(&cur_frames); HRESULT hr = ad->audio_output.audio_client->GetCurrentPadding(&cur_frames);
if (hr == S_OK) { if (hr == S_OK) {
// Check how much frames are available on the WASAPI buffer // Check how much frames are available on the WASAPI buffer
UINT32 avail_frames = ad->buffer_frames - cur_frames; UINT32 write_frames = MIN(ad->buffer_frames - cur_frames, avail_frames);
UINT32 write_frames = avail_frames > left_frames ? left_frames : avail_frames; if (write_frames > 0) {
BYTE *buffer = NULL;
hr = ad->audio_output.render_client->GetBuffer(write_frames, &buffer);
if (hr == S_OK) {
BYTE *buffer = NULL; // We're using WASAPI Shared Mode so we must convert the buffer
hr = ad->render_client->GetBuffer(write_frames, &buffer); if (ad->channels == ad->audio_output.channels) {
if (hr == S_OK) { for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
// We're using WASAPI Shared Mode so we must convert the buffer ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i, ad->samples_in.write[write_ofs++]);
if (ad->channels == ad->wasapi_channels) {
for (unsigned int i = 0; i < write_frames * ad->channels; i++) {
ad->write_sample(ad, buffer, i, ad->samples_in[buffer_idx++]);
}
} else {
for (unsigned int i = 0; i < write_frames; i++) {
for (unsigned int j = 0; j < MIN(ad->channels, ad->wasapi_channels); j++) {
ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, ad->samples_in[buffer_idx++]);
} }
if (ad->wasapi_channels > ad->channels) { } else {
for (unsigned int j = ad->channels; j < ad->wasapi_channels; j++) { for (unsigned int i = 0; i < write_frames; i++) {
ad->write_sample(ad, buffer, i * ad->wasapi_channels + j, 0); for (unsigned int j = 0; j < MIN(ad->channels, ad->audio_output.channels); j++) {
ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, ad->samples_in.write[write_ofs++]);
}
if (ad->audio_output.channels > ad->channels) {
for (unsigned int j = ad->channels; j < ad->audio_output.channels; j++) {
ad->write_sample(ad->audio_output.format_tag, ad->audio_output.bits_per_sample, buffer, i * ad->audio_output.channels + j, 0);
}
} }
} }
} }
}
hr = ad->render_client->ReleaseBuffer(write_frames, 0); hr = ad->audio_output.render_client->ReleaseBuffer(write_frames, 0);
if (hr != S_OK) { if (hr != S_OK) {
ERR_PRINT("WASAPI: Release buffer error"); ERR_PRINT("WASAPI: Release buffer error");
} }
left_frames -= write_frames; avail_frames -= write_frames;
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { written_frames += write_frames;
invalidated = true; } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
} else { // Device is not valid anymore, reopen it
ERR_PRINT("WASAPI: Get buffer error");
ad->exit_thread = true; Error err = ad->finish_render_device();
if (err != OK) {
ERR_PRINT("WASAPI: finish_render_device error");
} else {
// We reopened the device and samples_in may have resized, so invalidate the current avail_frames
avail_frames = 0;
}
} else {
ERR_PRINT("WASAPI: Get buffer error");
ad->exit_thread = true;
}
} }
} else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) { } else if (hr == AUDCLNT_E_DEVICE_INVALIDATED) {
invalidated = true; invalidated = true;
@ -829,16 +634,10 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
ERR_PRINT("WASAPI: finish_render_device error"); ERR_PRINT("WASAPI: finish_render_device error");
} }
} }
ad->stop_counting_ticks();
ad->unlock();
} }
ad->lock();
ad->start_counting_ticks();
// If we're using the Default device and it changed finish it so we'll re-init the device // If we're using the Default device and it changed finish it so we'll re-init the device
if (ad->device_name == "Default" && default_render_device_changed) { if (ad->audio_output.device_name == "Default" && default_render_device_changed) {
Error err = ad->finish_render_device(); Error err = ad->finish_render_device();
if (err != OK) { if (err != OK) {
ERR_PRINT("WASAPI: finish_render_device error"); ERR_PRINT("WASAPI: finish_render_device error");
@ -848,23 +647,67 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
} }
// User selected a new device, finish the current one so we'll init the new device // User selected a new device, finish the current one so we'll init the new device
if (ad->device_name != ad->new_device) { if (ad->audio_output.device_name != ad->audio_output.new_device) {
ad->device_name = ad->new_device; ad->audio_output.device_name = ad->audio_output.new_device;
Error err = ad->finish_render_device(); Error err = ad->finish_render_device();
if (err != OK) { if (err != OK) {
ERR_PRINT("WASAPI: finish_render_device error"); ERR_PRINT("WASAPI: finish_render_device error");
} }
} }
if (!ad->audio_client) { if (!ad->audio_output.audio_client) {
Error err = ad->init_render_device(true); Error err = ad->init_render_device(true);
if (err == OK) { if (err == OK) {
ad->start(); ad->start();
} }
} }
if (ad->audio_input.active) {
UINT32 packet_length = 0;
BYTE *data;
UINT32 num_frames_available;
DWORD flags;
HRESULT hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
if (hr == S_OK) {
while (packet_length != 0) {
hr = ad->audio_input.capture_client->GetBuffer(&data, &num_frames_available, &flags, NULL, NULL);
ERR_BREAK(hr != S_OK);
// fixme: Only works for floating point atm
for (int j = 0; j < num_frames_available * ad->audio_input.channels; j++) {
int32_t sample;
if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
sample = 0;
} else {
sample = read_sample(ad->audio_input.format_tag, ad->audio_input.bits_per_sample, data, j);
}
ad->audio_input_buffer.write[ad->audio_input_position++] = sample;
if (ad->audio_input_position >= ad->audio_input_buffer.size()) {
ad->audio_input_position = 0;
}
}
read_frames += num_frames_available;
hr = ad->audio_input.capture_client->ReleaseBuffer(num_frames_available);
ERR_BREAK(hr != S_OK);
hr = ad->audio_input.capture_client->GetNextPacketSize(&packet_length);
ERR_BREAK(hr != S_OK);
}
}
}
ad->stop_counting_ticks(); ad->stop_counting_ticks();
ad->unlock(); ad->unlock();
// Let the thread rest a while if we haven't read or write anything
if (written_frames == 0 && read_frames == 0) {
OS::get_singleton()->delay_usec(1000);
}
} }
ad->thread_exited = true; ad->thread_exited = true;
@ -872,12 +715,12 @@ void AudioDriverWASAPI::thread_func(void *p_udata) {
void AudioDriverWASAPI::start() { void AudioDriverWASAPI::start() {
if (audio_client) { if (audio_output.audio_client) {
HRESULT hr = audio_client->Start(); HRESULT hr = audio_output.audio_client->Start();
if (hr != S_OK) { if (hr != S_OK) {
ERR_PRINT("WASAPI: Start failed"); ERR_PRINT("WASAPI: Start failed");
} else { } else {
active = true; audio_output.active = true;
} }
} }
} }
@ -904,7 +747,7 @@ void AudioDriverWASAPI::finish() {
thread = NULL; thread = NULL;
} }
finish_capture_devices(); finish_capture_device();
finish_render_device(); finish_render_device();
if (mutex) { if (mutex) {
@ -913,83 +756,70 @@ void AudioDriverWASAPI::finish() {
} }
} }
bool AudioDriverWASAPI::capture_device_start(StringName p_name) { Error AudioDriverWASAPI::capture_start() {
if (microphone_device_output_map.has(p_name)) { Error err = init_capture_device();
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output_wasapi = static_cast<MicrophoneDeviceOutputDirectWASAPI *>(microphone_device_output_map[p_name]); if (err != OK) {
if (microphone_device_output_wasapi->active == false) { ERR_PRINT("WASAPI: init_capture_device error");
microphone_device_output_wasapi->audio_client->Start(); return err;
microphone_device_output_wasapi->active = true;
microphone_device_output_wasapi->set_read_index(-2048);
}
return true;
} }
return false; if (audio_input.active == false) {
} audio_input.audio_client->Start();
audio_input.active = true;
bool AudioDriverWASAPI::capture_device_stop(StringName p_name) { return OK;
if (microphone_device_output_map.has(p_name)) {
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output_wasapi = static_cast<MicrophoneDeviceOutputDirectWASAPI *>(microphone_device_output_map[p_name]);
if (microphone_device_output_wasapi->active == true) {
microphone_device_output_wasapi->audio_client->Stop();
microphone_device_output_wasapi->active = false;
}
return true;
} }
return false; return FAILED;
} }
PoolStringArray AudioDriverWASAPI::capture_device_get_names() { Error AudioDriverWASAPI::capture_stop() {
PoolStringArray names; if (audio_input.active == true) {
audio_input.audio_client->Stop();
audio_input.active = false;
for (int i = 0; i < microphone_device_outputs.size(); i++) { return OK;
MicrophoneDeviceOutputDirectWASAPI *microphone_device_output_wasapi = static_cast<MicrophoneDeviceOutputDirectWASAPI *>(microphone_device_outputs.get(i));
names.push_back(microphone_device_output_wasapi->name);
} }
return names; return FAILED;
} }
StringName AudioDriverWASAPI::capture_device_get_default_name() { void AudioDriverWASAPI::capture_set_device(StringName p_name) {
lock(); lock();
StringName capture_device_default_name_local = capture_device_default_name; audio_input.new_device = p_name;
unlock();
}
Array AudioDriverWASAPI::capture_get_device_list() {
return audio_device_get_list(true);
}
StringName AudioDriverWASAPI::capture_get_device() {
lock();
StringName name = audio_input.device_name;
unlock(); unlock();
return capture_device_default_name_local; return name;
} }
AudioDriverWASAPI::AudioDriverWASAPI() { AudioDriverWASAPI::AudioDriverWASAPI() {
audio_client = NULL;
render_client = NULL;
mutex = NULL; mutex = NULL;
thread = NULL; thread = NULL;
format_tag = 0;
bits_per_sample = 0;
samples_in.clear(); samples_in.clear();
buffer_size = 0;
channels = 0; channels = 0;
wasapi_channels = 0;
mix_rate = 0; mix_rate = 0;
buffer_frames = 0; buffer_frames = 0;
thread_exited = false; thread_exited = false;
exit_thread = false; exit_thread = false;
active = false;
device_name = "Default";
new_device = "Default";
capture_device_default_name = "";
} }
#endif #endif

View file

@ -43,53 +43,63 @@
class AudioDriverWASAPI : public AudioDriver { class AudioDriverWASAPI : public AudioDriver {
HANDLE event; class AudioDeviceWASAPI {
// Audio out
IAudioClient *audio_client;
IAudioRenderClient *render_client;
// Microphone
class MicrophoneDeviceOutputDirectWASAPI : public MicrophoneDeviceOutputDirect {
public: public:
IAudioClient *audio_client; IAudioClient *audio_client;
IAudioRenderClient *render_client;
IAudioCaptureClient *capture_client; IAudioCaptureClient *capture_client;
WORD capture_format_tag; bool active;
WORD format_tag;
WORD bits_per_sample;
unsigned int channels;
unsigned int frame_size;
String device_name;
String new_device;
AudioDeviceWASAPI() {
audio_client = NULL;
render_client = NULL;
capture_client = NULL;
active = false;
format_tag = 0;
bits_per_sample = 0;
channels = 0;
frame_size = 0;
device_name = "Default";
new_device = "Default";
}
}; };
//
AudioDeviceWASAPI audio_input;
AudioDeviceWASAPI audio_output;
Mutex *mutex; Mutex *mutex;
Thread *thread; Thread *thread;
String device_name;
String new_device;
String capture_device_default_name;
WORD format_tag;
WORD bits_per_sample;
Vector<int32_t> samples_in; Vector<int32_t> samples_in;
Map<StringName, StringName> capture_device_id_map;
unsigned int buffer_size;
unsigned int channels; unsigned int channels;
unsigned int wasapi_channels;
int mix_rate; int mix_rate;
int buffer_frames; int buffer_frames;
bool thread_exited; bool thread_exited;
mutable bool exit_thread; mutable bool exit_thread;
bool active;
_FORCE_INLINE_ void write_sample(AudioDriverWASAPI *ad, BYTE *buffer, int i, int32_t sample); static _FORCE_INLINE_ void write_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i, int32_t sample);
static _FORCE_INLINE_ float read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i); static _FORCE_INLINE_ int32_t read_sample(WORD format_tag, int bits_per_sample, BYTE *buffer, int i);
static void thread_func(void *p_udata); static void thread_func(void *p_udata);
StringName get_default_capture_device_name(IMMDeviceEnumerator *p_enumerator);
Error init_render_device(bool reinit = false); Error init_render_device(bool reinit = false);
Error init_capture_devices(bool reinit = false); Error init_capture_device(bool reinit = false);
Error finish_render_device(); Error finish_render_device();
Error finish_capture_devices(); Error finish_capture_device();
Error audio_device_init(AudioDeviceWASAPI *p_device, bool p_capture, bool reinit);
Error audio_device_finish(AudioDeviceWASAPI *p_device);
Array audio_device_get_list(bool p_capture);
public: public:
virtual const char *get_name() const { virtual const char *get_name() const {
@ -107,10 +117,11 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name); virtual Error capture_start();
virtual bool capture_device_stop(StringName p_name); virtual Error capture_stop();
virtual PoolStringArray capture_device_get_names(); virtual Array capture_get_device_list();
virtual StringName capture_device_get_default_name(); virtual void capture_set_device(StringName p_name);
virtual StringName capture_get_device();
AudioDriverWASAPI(); AudioDriverWASAPI();
}; };

View file

@ -210,28 +210,6 @@ void AudioDriverXAudio2::finish() {
thread = NULL; thread = NULL;
}; };
bool AudioDriverXAudio2::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverXAudio2::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverXAudio2::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverXAudio2::capture_device_get_default_name() {
return "";
}
AudioDriverXAudio2::AudioDriverXAudio2() { AudioDriverXAudio2::AudioDriverXAudio2() {
mutex = NULL; mutex = NULL;

View file

@ -103,11 +103,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
AudioDriverXAudio2(); AudioDriverXAudio2();
~AudioDriverXAudio2(); ~AudioDriverXAudio2();
}; };

View file

@ -195,28 +195,6 @@ void AudioDriverAndroid::finish() {
active = false; active = false;
} }
bool AudioDriverAndroid::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverAndroid::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverAndroid::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverAndroid::capture_device_get_default_name() {
return "";
}
void AudioDriverAndroid::set_pause(bool p_pause) { void AudioDriverAndroid::set_pause(bool p_pause) {
JNIEnv *env = ThreadAndroid::get_env(); JNIEnv *env = ThreadAndroid::get_env();

View file

@ -70,11 +70,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
virtual void set_pause(bool p_pause); virtual void set_pause(bool p_pause);
static void setup(jobject p_io); static void setup(jobject p_io);

View file

@ -249,28 +249,6 @@ void AudioDriverOpenSL::finish() {
(*sl)->Destroy(sl); (*sl)->Destroy(sl);
} }
bool AudioDriverOpenSL::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverOpenSL::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverOpenSL::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverOpenSL::capture_device_get_default_name() {
return "";
}
void AudioDriverOpenSL::set_pause(bool p_pause) { void AudioDriverOpenSL::set_pause(bool p_pause) {
pause = p_pause; pause = p_pause;

View file

@ -98,11 +98,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
virtual void set_pause(bool p_pause); virtual void set_pause(bool p_pause);
AudioDriverOpenSL(); AudioDriverOpenSL();

View file

@ -125,28 +125,6 @@ void AudioDriverDummy::finish() {
thread = NULL; thread = NULL;
}; };
bool AudioDriverDummy::capture_device_start(StringName p_name) {
return false;
}
bool AudioDriverDummy::capture_device_stop(StringName p_name) {
return false;
}
PoolStringArray AudioDriverDummy::capture_device_get_names() {
PoolStringArray names;
return names;
}
StringName AudioDriverDummy::capture_device_get_default_name() {
return "";
}
AudioDriverDummy::AudioDriverDummy() { AudioDriverDummy::AudioDriverDummy() {
mutex = NULL; mutex = NULL;

View file

@ -68,11 +68,6 @@ public:
virtual void unlock(); virtual void unlock();
virtual void finish(); virtual void finish();
virtual bool capture_device_start(StringName p_name);
virtual bool capture_device_stop(StringName p_name);
virtual PoolStringArray capture_device_get_names();
virtual StringName capture_device_get_default_name();
AudioDriverDummy(); AudioDriverDummy();
~AudioDriverDummy(); ~AudioDriverDummy();
}; };

View file

@ -29,6 +29,7 @@
/*************************************************************************/ /*************************************************************************/
#include "audio_stream.h" #include "audio_stream.h"
#include "os/os.h"
////////////////////////////// //////////////////////////////
@ -119,33 +120,11 @@ String AudioStreamMicrophone::get_stream_name() const {
return "Microphone"; return "Microphone";
} }
void AudioStreamMicrophone::set_microphone_name(const String &p_name) {
if (microphone_name != p_name) {
microphone_name = p_name;
for (Set<AudioStreamPlaybackMicrophone *>::Element *E = playbacks.front(); E; E = E->next()) {
if (E->get()->active) {
// Is this the right thing to do?
E->get()->stop();
E->get()->start();
}
}
}
}
StringName AudioStreamMicrophone::get_microphone_name() const {
return microphone_name;
}
float AudioStreamMicrophone::get_length() const { float AudioStreamMicrophone::get_length() const {
return 0; return 0;
} }
void AudioStreamMicrophone::_bind_methods() { void AudioStreamMicrophone::_bind_methods() {
ClassDB::bind_method(D_METHOD("set_microphone_name", "name"), &AudioStreamMicrophone::set_microphone_name);
ClassDB::bind_method(D_METHOD("get_microphone_name"), &AudioStreamMicrophone::get_microphone_name);
ADD_PROPERTY(PropertyInfo(Variant::STRING, "microphone_name"), "set_microphone_name", "get_microphone_name");
} }
AudioStreamMicrophone::AudioStreamMicrophone() { AudioStreamMicrophone::AudioStreamMicrophone() {
@ -153,20 +132,25 @@ AudioStreamMicrophone::AudioStreamMicrophone() {
void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) { void AudioStreamPlaybackMicrophone::_mix_internal(AudioFrame *p_buffer, int p_frames) {
AudioDriver::MicrophoneDeviceOutput *microphone_device_output = reciever->owner; AudioDriver::get_singleton()->lock();
const Vector<AudioFrame> &source_buffer = microphone_device_output->get_buffer();
int current_buffer_size = microphone_device_output->get_current_buffer_size(); Vector<int32_t> buf = AudioDriver::get_singleton()->get_audio_input_buffer();
for (int i = 0; i < p_frames; i++) { for (int i = 0; i < p_frames; i++) {
if (current_buffer_size >= internal_mic_offset) {
if (internal_mic_offset >= source_buffer.size()) { float l = (buf[input_ofs++] >> 16) / 32768.f;
internal_mic_offset = 0; if (input_ofs >= buf.size()) {
} input_ofs = 0;
p_buffer[i] = source_buffer[internal_mic_offset++];
} else {
p_buffer[i] = AudioFrame(0.f, 0.f);
} }
float r = (buf[input_ofs++] >> 16) / 32768.f;
if (input_ofs >= buf.size()) {
input_ofs = 0;
}
p_buffer[i] = AudioFrame(l, r);
} }
AudioDriver::get_singleton()->unlock();
} }
void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) { void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale, int p_frames) {
@ -174,28 +158,21 @@ void AudioStreamPlaybackMicrophone::mix(AudioFrame *p_buffer, float p_rate_scale
} }
float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() { float AudioStreamPlaybackMicrophone::get_stream_sampling_rate() {
return reciever->owner->get_mix_rate(); return AudioDriver::get_singleton()->get_mix_rate();
} }
void AudioStreamPlaybackMicrophone::start(float p_from_pos) { void AudioStreamPlaybackMicrophone::start(float p_from_pos) {
internal_mic_offset = 0; input_ofs = 0;
AudioDriver::get_singleton()->capture_start();
active = true; active = true;
// note: can this be called twice?
reciever = AudioServer::get_singleton()->create_microphone_reciever(microphone->get_microphone_name());
if (reciever == NULL) {
active = false;
}
_begin_resample(); _begin_resample();
} }
void AudioStreamPlaybackMicrophone::stop() { void AudioStreamPlaybackMicrophone::stop() {
AudioDriver::get_singleton()->capture_stop();
active = false; active = false;
if (reciever != NULL) {
AudioServer::get_singleton()->destroy_microphone_reciever(reciever);
reciever = NULL;
}
} }
bool AudioStreamPlaybackMicrophone::is_playing() const { bool AudioStreamPlaybackMicrophone::is_playing() const {
@ -220,8 +197,6 @@ AudioStreamPlaybackMicrophone::~AudioStreamPlaybackMicrophone() {
} }
AudioStreamPlaybackMicrophone::AudioStreamPlaybackMicrophone() { AudioStreamPlaybackMicrophone::AudioStreamPlaybackMicrophone() {
internal_mic_offset = 0;
reciever = NULL;
} }
//////////////////////////////// ////////////////////////////////

View file

@ -104,7 +104,6 @@ class AudioStreamMicrophone : public AudioStream {
friend class AudioStreamPlaybackMicrophone; friend class AudioStreamPlaybackMicrophone;
Set<AudioStreamPlaybackMicrophone *> playbacks; Set<AudioStreamPlaybackMicrophone *> playbacks;
StringName microphone_name;
protected: protected:
static void _bind_methods(); static void _bind_methods();
@ -113,9 +112,6 @@ public:
virtual Ref<AudioStreamPlayback> instance_playback(); virtual Ref<AudioStreamPlayback> instance_playback();
virtual String get_stream_name() const; virtual String get_stream_name() const;
void set_microphone_name(const String &p_name);
StringName get_microphone_name() const;
virtual float get_length() const; //if supported, otherwise return 0 virtual float get_length() const; //if supported, otherwise return 0
AudioStreamMicrophone(); AudioStreamMicrophone();
@ -127,10 +123,9 @@ class AudioStreamPlaybackMicrophone : public AudioStreamPlaybackResampled {
friend class AudioStreamMicrophone; friend class AudioStreamMicrophone;
bool active; bool active;
uint32_t internal_mic_offset; unsigned int input_ofs;
Ref<AudioStreamMicrophone> microphone; Ref<AudioStreamMicrophone> microphone;
AudioDriver::MicrophoneReciever *reciever;
protected: protected:
virtual void _mix_internal(AudioFrame *p_buffer, int p_frames); virtual void _mix_internal(AudioFrame *p_buffer, int p_frames);

View file

@ -73,42 +73,6 @@ void AudioDriver::update_mix_time(int p_frames) {
_last_mix_time = OS::get_singleton()->get_ticks_usec(); _last_mix_time = OS::get_singleton()->get_ticks_usec();
} }
void AudioDriver::update_microphone_default(StringName p_device_name) {
if (default_microphone_device_output != NULL) {
MicrophoneDeviceOutput *output = default_microphone_device_output->owner;
output->remove_reciever(default_microphone_device_output);
while (output != NULL) {
MicrophoneDeviceOutput *owner = output->owner;
if (output->recievers.size() == 0) {
if (owner == NULL) {
if (output->active == true) {
capture_device_stop(output->name);
output->active == false;
}
} else {
owner->remove_reciever(output);
memdelete(output);
}
output = owner;
}
}
if (microphone_device_output_map.has(p_device_name)) {
Map<StringName, MicrophoneDeviceOutput *>::Element *e = microphone_device_output_map.find(p_device_name);
MicrophoneDeviceOutput *new_output = e->get();
new_output->add_reciever(default_microphone_device_output);
if (new_output->active == false) {
capture_device_start(p_device_name);
new_output->active = true;
}
}
output = default_microphone_device_output->owner;
}
}
double AudioDriver::get_mix_time() const { double AudioDriver::get_mix_time() const {
double total = (OS::get_singleton()->get_ticks_usec() - _last_mix_time) / 1000000.0; double total = (OS::get_singleton()->get_ticks_usec() - _last_mix_time) / 1000000.0;
@ -138,74 +102,6 @@ int AudioDriver::get_total_channels_by_speaker_mode(AudioDriver::SpeakerMode p_m
ERR_FAIL_V(2); ERR_FAIL_V(2);
} }
AudioDriver::MicrophoneReciever *AudioDriver::create_microphone_reciever(const StringName &p_device_name) {
MicrophoneReciever *microphone_reciever = NULL;
MicrophoneDeviceOutput *reciever_output = NULL;
MicrophoneDeviceOutput *device_output = NULL;
StringName device_name = capture_device_get_default_name();
if (microphone_device_output_map.has(device_name)) {
Map<StringName, MicrophoneDeviceOutput *>::Element *e = microphone_device_output_map.find(device_name);
device_output = e->get();
}
if (device_output) {
if (p_device_name == "") {
if (default_microphone_device_output != NULL) {
reciever_output = default_microphone_device_output;
} else {
// Default reciever does not exist, create it and connect it
default_microphone_device_output = memnew(MicrophoneDeviceOutputIndirect);
reciever_output = default_microphone_device_output;
device_output->add_reciever(reciever_output);
}
} else {
if (microphone_device_output_map.has(p_device_name)) {
reciever_output = device_output;
}
}
if (reciever_output) {
microphone_reciever = memnew(MicrophoneReciever);
reciever_output->add_reciever(microphone_reciever);
if (device_output->active == false) {
capture_device_start(device_name);
device_output->active = true;
}
}
}
return microphone_reciever;
}
void AudioDriver::destroy_microphone_reciever(AudioDriver::MicrophoneReciever *p_microphone_reciever) {
if (p_microphone_reciever != NULL) {
MicrophoneDeviceOutput *output = p_microphone_reciever->owner;
output->remove_reciever(p_microphone_reciever);
while (output != NULL) {
MicrophoneDeviceOutput *owner = output->owner;
if (output->recievers.size() == 0) {
if (owner == NULL) {
if (output->active == true) {
capture_device_stop(output->name);
output->active == false;
}
} else {
owner->remove_reciever(output);
memdelete(output);
}
output = owner;
}
}
memdelete(p_microphone_reciever);
}
}
Array AudioDriver::get_device_list() { Array AudioDriver::get_device_list() {
Array list; Array list;
@ -218,13 +114,19 @@ String AudioDriver::get_device() {
return "Default"; return "Default";
} }
Array AudioDriver::capture_get_device_list() {
Array list;
list.push_back("Default");
return list;
}
AudioDriver::AudioDriver() { AudioDriver::AudioDriver() {
_last_mix_time = 0; _last_mix_time = 0;
_mix_amount = 0; _mix_amount = 0;
default_microphone_device_output = NULL;
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
prof_time = 0; prof_time = 0;
#endif #endif
@ -1308,32 +1210,19 @@ void AudioServer::set_device(String device) {
AudioDriver::get_singleton()->set_device(device); AudioDriver::get_singleton()->set_device(device);
} }
PoolStringArray AudioServer::audio_in_get_device_names() { Array AudioServer::capture_get_device_list() {
lock(); return AudioDriver::get_singleton()->capture_get_device_list();
PoolStringArray device_names = AudioDriver::get_singleton()->capture_device_get_names();
unlock();
return device_names;
} }
AudioDriver::MicrophoneReciever *AudioServer::create_microphone_reciever(const StringName &p_device_name) { StringName AudioServer::capture_get_device() {
AudioDriver::MicrophoneReciever *microphone_reciever = NULL;
lock(); return AudioDriver::get_singleton()->capture_get_device();
microphone_reciever = AudioDriver::get_singleton()->create_microphone_reciever(p_device_name);
unlock();
return microphone_reciever;
} }
void AudioServer::destroy_microphone_reciever(AudioDriver::MicrophoneReciever *p_microphone_reciever) { void AudioServer::capture_set_device(StringName device) {
lock();
AudioDriver::get_singleton()->destroy_microphone_reciever(p_microphone_reciever);
unlock();
}
void AudioServer::_change_default_device(StringName p_recording_device_default_name) { AudioDriver::get_singleton()->capture_set_device(device);
} }
void AudioServer::_bind_methods() { void AudioServer::_bind_methods() {

View file

@ -53,9 +53,11 @@ class AudioDriver {
#endif #endif
protected: protected:
Vector<int32_t> audio_input_buffer;
unsigned int audio_input_position;
void audio_server_process(int p_frames, int32_t *p_buffer, bool p_update_mix_time = true); void audio_server_process(int p_frames, int32_t *p_buffer, bool p_update_mix_time = true);
void update_mix_time(int p_frames); void update_mix_time(int p_frames);
void update_microphone_default(StringName p_device_name);
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
_FORCE_INLINE_ void start_counting_ticks() { prof_ticks = OS::get_singleton()->get_ticks_usec(); } _FORCE_INLINE_ void start_counting_ticks() { prof_ticks = OS::get_singleton()->get_ticks_usec(); }
@ -68,126 +70,6 @@ protected:
public: public:
double get_mix_time() const; //useful for video -> audio sync double get_mix_time() const; //useful for video -> audio sync
class MicrophoneDeviceOutput;
class MicrophoneReciever {
public:
MicrophoneDeviceOutput *owner;
MicrophoneReciever() {
owner = NULL;
}
~MicrophoneReciever() {
}
};
class MicrophoneDeviceOutput : public MicrophoneReciever {
public:
StringName name;
bool active;
Vector<MicrophoneReciever *> recievers;
virtual unsigned int get_mix_rate() = 0;
virtual Vector<AudioFrame> &get_buffer() = 0;
virtual int get_current_buffer_size() = 0;
virtual int get_read_index() = 0;
virtual void set_read_index(int p_temp_index) = 0;
void add_reciever(MicrophoneReciever *p_reciever) {
if (p_reciever == NULL) {
ERR_PRINT("Attempted to add NULL reciever")
return;
}
if (recievers.find(p_reciever) == -1) {
recievers.push_back(p_reciever);
p_reciever->owner = this;
} else {
ERR_PRINT("Duplicate reciever added")
}
}
void remove_reciever(MicrophoneReciever *p_reciever) {
if (p_reciever == NULL) {
ERR_PRINT("Attempted to remove NULL reciever")
return;
}
int index = recievers.find(p_reciever);
if (index != -1) {
recievers.remove(index);
p_reciever->owner = NULL;
} else {
ERR_PRINT("Attempted to remove invalid reciever")
}
}
};
class MicrophoneDeviceOutputDirect : public MicrophoneDeviceOutput {
public:
enum MicrophoneFormat {
FORMAT_FLOAT,
FORMAT_PCM
};
MicrophoneFormat microphone_format;
unsigned short bits_per_sample;
unsigned int channels;
unsigned int mix_rate;
unsigned short frame_size;
int read_index = -2048;
unsigned int current_capture_index;
unsigned int current_capture_size;
Vector<AudioFrame> buffer;
unsigned int get_mix_rate() {
return mix_rate;
};
Vector<AudioFrame> &get_buffer() {
return buffer;
};
int get_current_buffer_size() { return current_capture_size; }
int get_read_index() {
return read_index;
}
void set_read_index(int p_read_index) {
read_index = p_read_index;
}
};
class MicrophoneDeviceOutputIndirect : public MicrophoneDeviceOutput {
public:
unsigned int get_mix_rate() {
return owner->get_mix_rate();
};
Vector<AudioFrame> &get_buffer() {
return owner->get_buffer();
};
int get_read_index() {
return owner->get_read_index();
}
void set_read_index(int p_read_index) {
owner->set_read_index(p_read_index);
}
int get_current_buffer_size() { return owner->get_current_buffer_size(); }
};
MicrophoneDeviceOutputIndirect *default_microphone_device_output;
Vector<MicrophoneDeviceOutput *> microphone_device_outputs;
Map<StringName, MicrophoneDeviceOutput *> microphone_device_output_map;
Vector<MicrophoneReciever *> direct_recievers;
enum SpeakerMode { enum SpeakerMode {
SPEAKER_MODE_STEREO, SPEAKER_MODE_STEREO,
SPEAKER_SURROUND_31, SPEAKER_SURROUND_31,
@ -214,18 +96,19 @@ public:
virtual void unlock() = 0; virtual void unlock() = 0;
virtual void finish() = 0; virtual void finish() = 0;
virtual bool capture_device_start(StringName p_name) = 0; virtual Error capture_start() { return FAILED; }
virtual bool capture_device_stop(StringName p_name) = 0; virtual Error capture_stop() { return FAILED; }
virtual PoolStringArray capture_device_get_names() = 0; virtual void capture_set_device(StringName p_name) {}
virtual StringName capture_device_get_default_name() = 0; virtual StringName capture_get_device() { return "Default"; }
virtual Array capture_get_device_list(); // TODO: convert this and get_device_list to PoolStringArray
virtual float get_latency() { return 0; } virtual float get_latency() { return 0; }
SpeakerMode get_speaker_mode_by_total_channels(int p_channels) const; SpeakerMode get_speaker_mode_by_total_channels(int p_channels) const;
int get_total_channels_by_speaker_mode(SpeakerMode) const; int get_total_channels_by_speaker_mode(SpeakerMode) const;
AudioDriver::MicrophoneReciever *create_microphone_reciever(const StringName &p_device_name); Vector<int32_t> get_audio_input_buffer() { return audio_input_buffer; }
void destroy_microphone_reciever(AudioDriver::MicrophoneReciever *p_microphone_reciever); unsigned int get_audio_input_position() { return audio_input_position; }
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
uint64_t get_profiling_time() const { return prof_time; } uint64_t get_profiling_time() const { return prof_time; }
@ -379,7 +262,6 @@ private:
friend class AudioDriver; friend class AudioDriver;
void _driver_process(int p_frames, int32_t *p_buffer); void _driver_process(int p_frames, int32_t *p_buffer);
void _change_default_device(StringName p_recording_device_default_name);
protected: protected:
static void _bind_methods(); static void _bind_methods();
@ -479,10 +361,9 @@ public:
String get_device(); String get_device();
void set_device(String device); void set_device(String device);
AudioDriver::MicrophoneReciever *create_microphone_reciever(const StringName &p_device_name); Array capture_get_device_list();
void destroy_microphone_reciever(AudioDriver::MicrophoneReciever *p_microphone_reciever); StringName capture_get_device();
void capture_set_device(StringName device);
PoolStringArray audio_in_get_device_names();
float get_output_latency() { return output_latency; } float get_output_latency() { return output_latency; }
AudioServer(); AudioServer();