settings: add link.min-buffers option

Add link.min-buffers option to set the minimum amount of buffers to
create for links. Set this by default to 2 because we really need two
now in case the sink xruns and does async mixing.

Make this an option because in low-memory cases and when xruns are not
expected, we can set this to 1.
This commit is contained in:
Wim Taymans 2024-04-03 15:02:30 +02:00
parent 5829276dbb
commit dbedd09d42
7 changed files with 19 additions and 8 deletions

View file

@ -217,6 +217,10 @@ Default video rate denominator
@PAR@ pipewire.conf library.name.system = support/libspa-support @PAR@ pipewire.conf library.name.system = support/libspa-support
The name of the shared library to use for the system functions for the main thread. The name of the shared library to use for the system functions for the main thread.
@PAR@ pipewire.conf link.min-buffers = 2
The minimum number of buffers to negotiate between nodes. Using 1 buffer will consume
less memory but might cause glitches when using async nodes.
@PAR@ pipewire.conf link.max-buffers = 64 @PAR@ pipewire.conf link.max-buffers = 64
The maximum number of buffers to negotiate between nodes. Note that version < 3 clients The maximum number of buffers to negotiate between nodes. Note that version < 3 clients
can only support 16 buffers. More buffers is almost always worse than less, latency can only support 16 buffers. More buffers is almost always worse than less, latency

View file

@ -13,6 +13,7 @@ context.properties = {
#library.name.system = support/libspa-support #library.name.system = support/libspa-support
#context.data-loop.library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support
#support.dbus = true #support.dbus = true
#link.min-buffers = 2
#link.max-buffers = 64 #link.max-buffers = 64
link.max-buffers = 16 # version < 3 clients can't handle more link.max-buffers = 16 # version < 3 clients can't handle more
#mem.warn-mlock = false #mem.warn-mlock = false

View file

@ -9,8 +9,6 @@ context.properties = {
#library.name.system = support/libspa-support #library.name.system = support/libspa-support
#context.data-loop.library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support
#support.dbus = true #support.dbus = true
#link.max-buffers = 64
#link.max-buffers = 16 # version < 3 clients can't handle more
#mem.warn-mlock = false #mem.warn-mlock = false
#mem.allow-mlock = true #mem.allow-mlock = true
#mem.mlock-all = false #mem.mlock-all = false

View file

@ -13,6 +13,7 @@ context.properties = {
#library.name.system = support/libspa-support #library.name.system = support/libspa-support
#context.data-loop.library.name.system = support/libspa-support #context.data-loop.library.name.system = support/libspa-support
#support.dbus = true #support.dbus = true
#link.min-buffers = 2
#link.max-buffers = 64 #link.max-buffers = 64
link.max-buffers = 16 # version < 3 clients can't handle more link.max-buffers = 16 # version < 3 clients can't handle more
#mem.warn-mlock = false #mem.warn-mlock = false

View file

@ -98,8 +98,8 @@ static int alloc_buffers(struct pw_mempool *pool,
data = NULL; data = NULL;
} }
pw_log_debug("%p: layout buffers skel:%p data:%p buffers:%p", pw_log_debug("%p: layout buffers skel:%p data:%p n_buffers:%d buffers:%p",
allocation, skel, data, buffers); allocation, skel, data, n_buffers, buffers);
spa_buffer_alloc_layout_array(&info, n_buffers, buffers, skel, data); spa_buffer_alloc_layout_array(&info, n_buffers, buffers, skel, data);
allocation->mem = m; allocation->mem = m;
@ -295,9 +295,6 @@ int pw_buffers_negotiate(struct pw_context *context, uint32_t flags,
align = SPA_MAX(align, qalign); align = SPA_MAX(align, qalign);
types = qtypes; types = qtypes;
if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_ASYNC))
max_buffers = SPA_MAX(2u, max_buffers);
pw_log_debug("%p: %d %d %d %d %d %d -> %d %zd %zd %d %zd %d", result, pw_log_debug("%p: %d %d %d %d %d %d -> %d %zd %zd %d %zd %d", result,
qblocks, qminsize, qstride, qmax_buffers, qalign, qtypes, qblocks, qminsize, qstride, qmax_buffers, qalign, qtypes,
blocks, minsize, stride, max_buffers, align, types); blocks, minsize, stride, max_buffers, align, types);
@ -306,9 +303,13 @@ int pw_buffers_negotiate(struct pw_context *context, uint32_t flags,
if (i == n_params) { if (i == n_params) {
pw_log_warn("%p: no buffers param", result); pw_log_warn("%p: no buffers param", result);
minsize = context->settings.clock_quantum_limit; minsize = context->settings.clock_quantum_limit;
max_buffers = 2; max_buffers = 2u;
} }
if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_ASYNC))
max_buffers = SPA_MAX(2u, max_buffers);
max_buffers = SPA_MAX(context->settings.link_min_buffers, max_buffers);
if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_SHARED_MEM)) { if (SPA_FLAG_IS_SET(flags, PW_BUFFERS_FLAG_SHARED_MEM)) {
if (types != SPA_ID_INVALID) if (types != SPA_ID_INVALID)
SPA_FLAG_CLEAR(types, 1<<SPA_DATA_MemPtr); SPA_FLAG_CLEAR(types, 1<<SPA_DATA_MemPtr);

View file

@ -46,6 +46,7 @@ struct settings {
uint32_t clock_quantum_floor; /* quantum floor (lower bound) */ uint32_t clock_quantum_floor; /* quantum floor (lower bound) */
struct spa_rectangle video_size; struct spa_rectangle video_size;
struct spa_fraction video_rate; struct spa_fraction video_rate;
uint32_t link_min_buffers;
uint32_t link_max_buffers; uint32_t link_max_buffers;
unsigned int mem_warn_mlock:1; unsigned int mem_warn_mlock:1;
unsigned int mem_allow_mlock:1; unsigned int mem_allow_mlock:1;

View file

@ -32,6 +32,7 @@
#define DEFAULT_VIDEO_HEIGHT 480 #define DEFAULT_VIDEO_HEIGHT 480
#define DEFAULT_VIDEO_RATE_NUM 25u #define DEFAULT_VIDEO_RATE_NUM 25u
#define DEFAULT_VIDEO_RATE_DENOM 1u #define DEFAULT_VIDEO_RATE_DENOM 1u
#define DEFAULT_LINK_MIN_BUFFERS 2u
#define DEFAULT_LINK_MAX_BUFFERS 64u #define DEFAULT_LINK_MAX_BUFFERS 64u
#define DEFAULT_MEM_WARN_MLOCK false #define DEFAULT_MEM_WARN_MLOCK false
#define DEFAULT_MEM_ALLOW_MLOCK true #define DEFAULT_MEM_ALLOW_MLOCK true
@ -222,6 +223,7 @@ void pw_settings_init(struct pw_context *this)
d->log_level = get_default_int(p, "log.level", pw_log_level); d->log_level = get_default_int(p, "log.level", pw_log_level);
d->clock_power_of_two_quantum = get_default_bool(p, "clock.power-of-two-quantum", d->clock_power_of_two_quantum = get_default_bool(p, "clock.power-of-two-quantum",
DEFAULT_CLOCK_POWER_OF_TWO_QUANTUM); DEFAULT_CLOCK_POWER_OF_TWO_QUANTUM);
d->link_min_buffers = get_default_int(p, "link.min-buffers", DEFAULT_LINK_MIN_BUFFERS);
d->link_max_buffers = get_default_int(p, "link.max-buffers", DEFAULT_LINK_MAX_BUFFERS); d->link_max_buffers = get_default_int(p, "link.max-buffers", DEFAULT_LINK_MAX_BUFFERS);
d->mem_warn_mlock = get_default_bool(p, "mem.warn-mlock", DEFAULT_MEM_WARN_MLOCK); d->mem_warn_mlock = get_default_bool(p, "mem.warn-mlock", DEFAULT_MEM_WARN_MLOCK);
d->mem_allow_mlock = get_default_bool(p, "mem.allow-mlock", DEFAULT_MEM_ALLOW_MLOCK); d->mem_allow_mlock = get_default_bool(p, "mem.allow-mlock", DEFAULT_MEM_ALLOW_MLOCK);
@ -229,6 +231,9 @@ void pw_settings_init(struct pw_context *this)
d->check_quantum = get_default_bool(p, "settings.check-quantum", DEFAULT_CHECK_QUANTUM); d->check_quantum = get_default_bool(p, "settings.check-quantum", DEFAULT_CHECK_QUANTUM);
d->check_rate = get_default_bool(p, "settings.check-rate", DEFAULT_CHECK_RATE); d->check_rate = get_default_bool(p, "settings.check-rate", DEFAULT_CHECK_RATE);
d->link_min_buffers = SPA_MAX(d->link_min_buffers, 1u);
d->link_max_buffers = SPA_MAX(d->link_max_buffers, d->link_min_buffers);
d->clock_quantum_limit = SPA_CLAMP(d->clock_quantum_limit, d->clock_quantum_limit = SPA_CLAMP(d->clock_quantum_limit,
CLOCK_QUANTUM_FLOOR, CLOCK_QUANTUM_LIMIT); CLOCK_QUANTUM_FLOOR, CLOCK_QUANTUM_LIMIT);
d->clock_quantum_floor = SPA_CLAMP(d->clock_quantum_floor, d->clock_quantum_floor = SPA_CLAMP(d->clock_quantum_floor,