Clean up latency related functions

This commit is contained in:
Juan Linietsky 2019-04-27 12:22:47 -03:00
parent e5ed112d69
commit af9bb0ea15
6 changed files with 29 additions and 27 deletions

View file

@ -365,7 +365,7 @@ void VideoStreamPlaybackTheora::set_file(const String &p_file) {
float VideoStreamPlaybackTheora::get_time() const { float VideoStreamPlaybackTheora::get_time() const {
return time - AudioServer::get_singleton()->get_output_delay() - delay_compensation; //-((get_total())/(float)vi.rate); return time - AudioServer::get_singleton()->get_output_latency() - delay_compensation; //-((get_total())/(float)vi.rate);
}; };
Ref<Texture> VideoStreamPlaybackTheora::get_texture() { Ref<Texture> VideoStreamPlaybackTheora::get_texture() {

View file

@ -375,7 +375,7 @@ int VideoStreamPlaybackWebm::get_mix_rate() const {
inline bool VideoStreamPlaybackWebm::has_enough_video_frames() const { inline bool VideoStreamPlaybackWebm::has_enough_video_frames() const {
if (video_frames_pos > 0) { if (video_frames_pos > 0) {
const double audio_delay = AudioServer::get_singleton()->get_output_delay(); const double audio_delay = AudioServer::get_singleton()->get_output_latency();
const double video_time = video_frames[video_frames_pos - 1]->time; const double video_time = video_frames[video_frames_pos - 1]->time;
return video_time >= time + audio_delay + delay_compensation; return video_time >= time + audio_delay + delay_compensation;
} }
@ -383,7 +383,7 @@ inline bool VideoStreamPlaybackWebm::has_enough_video_frames() const {
} }
bool VideoStreamPlaybackWebm::should_process(WebMFrame &video_frame) { bool VideoStreamPlaybackWebm::should_process(WebMFrame &video_frame) {
const double audio_delay = AudioServer::get_singleton()->get_output_delay(); const double audio_delay = AudioServer::get_singleton()->get_output_latency();
return video_frame.time >= time + audio_delay + delay_compensation; return video_frame.time >= time + audio_delay + delay_compensation;
} }

View file

@ -1735,8 +1735,8 @@ public:
new_file += l + "\n"; new_file += l + "\n";
} else { } else {
String base = l.substr(0, last_tag_pos + last_tag.length()); String base = l.substr(0, last_tag_pos + last_tag.length());
if (manifest_sections.has("application_tags")) { if (manifest_sections.has("application_attribs")) {
for (List<String>::Element *E = manifest_sections["application_tags"].front(); E; E = E->next()) { for (List<String>::Element *E = manifest_sections["application_attribs"].front(); E; E = E->next()) {
String to_add = E->get().strip_edges(); String to_add = E->get().strip_edges();
base += " " + to_add + " "; base += " " + to_add + " ";
} }

View file

@ -130,7 +130,7 @@ Vector2 AudioEffectSpectrumAnalyzerInstance::get_magnitude_for_frequency_range(f
} }
uint64_t time = OS::get_singleton()->get_ticks_usec(); uint64_t time = OS::get_singleton()->get_ticks_usec();
float diff = double(time - last_fft_time) / 1000000.0 + base->get_tap_back_pos(); float diff = double(time - last_fft_time) / 1000000.0 + base->get_tap_back_pos();
diff -= AudioServer::get_singleton()->get_output_delay(); diff -= AudioServer::get_singleton()->get_output_latency();
float fft_time_size = float(fft_size) / mix_rate; float fft_time_size = float(fft_size) / mix_rate;
int fft_index = fft_pos; int fft_index = fft_pos;

View file

@ -68,16 +68,16 @@ void AudioDriver::audio_server_process(int p_frames, int32_t *p_buffer, bool p_u
void AudioDriver::update_mix_time(int p_frames) { void AudioDriver::update_mix_time(int p_frames) {
_mix_amount += p_frames; _last_mix_frames = p_frames;
if (OS::get_singleton()) if (OS::get_singleton())
_last_mix_time = OS::get_singleton()->get_ticks_usec(); _last_mix_time = OS::get_singleton()->get_ticks_usec();
} }
double AudioDriver::get_mix_time() const { double AudioDriver::get_time_to_next_mix() const {
double total = (OS::get_singleton()->get_ticks_usec() - _last_mix_time) / 1000000.0; double total = (OS::get_singleton()->get_ticks_usec() - _last_mix_time) / 1000000.0;
total += _mix_amount / (double)get_mix_rate(); double mix_buffer = _last_mix_frames / (double)get_mix_rate();
return total; return mix_buffer - total;
} }
void AudioDriver::input_buffer_init(int driver_buffer_frames) { void AudioDriver::input_buffer_init(int driver_buffer_frames) {
@ -148,7 +148,7 @@ Array AudioDriver::capture_get_device_list() {
AudioDriver::AudioDriver() { AudioDriver::AudioDriver() {
_last_mix_time = 0; _last_mix_time = 0;
_mix_amount = 0; _last_mix_frames = 0;
input_position = 0; input_position = 0;
input_size = 0; input_size = 0;
@ -281,13 +281,6 @@ void AudioServer::_driver_process(int p_frames, int32_t *p_buffer) {
to_mix -= to_copy; to_mix -= to_copy;
} }
// Calculate latency for Performance.AUDIO_OUTPUT_LATENCY
if (OS::get_singleton()) {
uint64_t ticks = OS::get_singleton()->get_ticks_usec();
output_latency = (ticks - output_latency_ticks) / 1000000.f;
output_latency_ticks = ticks;
}
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
prof_time += OS::get_singleton()->get_ticks_usec() - prof_ticks; prof_time += OS::get_singleton()->get_ticks_usec() - prof_ticks;
#endif #endif
@ -1107,13 +1100,14 @@ AudioServer *AudioServer::get_singleton() {
return singleton; return singleton;
} }
double AudioServer::get_mix_time() const { double AudioServer::get_output_latency() const {
return 0; return AudioDriver::get_singleton()->get_latency();
} }
double AudioServer::get_output_delay() const {
return 0; double AudioServer::get_time_to_next_mix() const {
return AudioDriver::get_singleton()->get_time_to_next_mix();
} }
AudioServer *AudioServer::singleton = NULL; AudioServer *AudioServer::singleton = NULL;
@ -1357,6 +1351,9 @@ void AudioServer::_bind_methods() {
ClassDB::bind_method(D_METHOD("get_device"), &AudioServer::get_device); ClassDB::bind_method(D_METHOD("get_device"), &AudioServer::get_device);
ClassDB::bind_method(D_METHOD("set_device", "device"), &AudioServer::set_device); ClassDB::bind_method(D_METHOD("set_device", "device"), &AudioServer::set_device);
ClassDB::bind_method(D_METHOD("get_time_to_next_mix"), &AudioServer::get_time_to_next_mix);
ClassDB::bind_method(D_METHOD("get_output_latency"), &AudioServer::get_output_latency);
ClassDB::bind_method(D_METHOD("capture_get_device_list"), &AudioServer::capture_get_device_list); ClassDB::bind_method(D_METHOD("capture_get_device_list"), &AudioServer::capture_get_device_list);
ClassDB::bind_method(D_METHOD("capture_get_device"), &AudioServer::capture_get_device); ClassDB::bind_method(D_METHOD("capture_get_device"), &AudioServer::capture_get_device);
ClassDB::bind_method(D_METHOD("capture_set_device", "name"), &AudioServer::capture_set_device); ClassDB::bind_method(D_METHOD("capture_set_device", "name"), &AudioServer::capture_set_device);
@ -1386,6 +1383,8 @@ AudioServer::AudioServer() {
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
prof_time = 0; prof_time = 0;
#endif #endif
mix_time = 0;
mix_size = 0;
} }
AudioServer::~AudioServer() { AudioServer::~AudioServer() {

View file

@ -45,7 +45,7 @@ class AudioDriver {
static AudioDriver *singleton; static AudioDriver *singleton;
uint64_t _last_mix_time; uint64_t _last_mix_time;
uint64_t _mix_amount; uint64_t _last_mix_frames;
#ifdef DEBUG_ENABLED #ifdef DEBUG_ENABLED
uint64_t prof_ticks; uint64_t prof_ticks;
@ -71,7 +71,8 @@ protected:
#endif #endif
public: public:
double get_mix_time() const; //useful for video -> audio sync double get_time_since_last_mix() const; //useful for video -> audio sync
double get_time_to_next_mix() const;
enum SpeakerMode { enum SpeakerMode {
SPEAKER_MODE_STEREO, SPEAKER_MODE_STEREO,
@ -163,6 +164,9 @@ public:
typedef void (*AudioCallback)(void *p_userdata); typedef void (*AudioCallback)(void *p_userdata);
private: private:
uint64_t mix_time;
int mix_size;
uint32_t buffer_size; uint32_t buffer_size;
uint64_t mix_count; uint64_t mix_count;
uint64_t mix_frames; uint64_t mix_frames;
@ -351,8 +355,8 @@ public:
static AudioServer *get_singleton(); static AudioServer *get_singleton();
virtual double get_mix_time() const; //useful for video -> audio sync virtual double get_output_latency() const;
virtual double get_output_delay() const; virtual double get_time_to_next_mix() const;
void *audio_data_alloc(uint32_t p_data_len, const uint8_t *p_from_data = NULL); void *audio_data_alloc(uint32_t p_data_len, const uint8_t *p_from_data = NULL);
void audio_data_free(void *p_data); void audio_data_free(void *p_data);
@ -377,7 +381,6 @@ public:
String capture_get_device(); String capture_get_device();
void capture_set_device(const String &p_name); void capture_set_device(const String &p_name);
float get_output_latency() { return output_latency; }
AudioServer(); AudioServer();
virtual ~AudioServer(); virtual ~AudioServer();
}; };