rtld: mask signals for default read locks

Rtld locks from libthr defer signals delivery, which means that binding
is not possible while a signal handler is executed.

Binding might upgrade read-locked rtld_bind_lock to write-lock, if
symbol resolution requires loading filters.  If a signal would be delivered
while rtld is in read-locked section, and signal handler needs binding
which upgrades the lock, for non-threaded image that uses default rtld
locks, we get the rtld data structures modified under the top-level
active rtld frame.

To correct the problem, mask signals for read-locking of default locks
in addition to the write-locking.  It is very cheap now with
sigfastblock(2).

Note that the global state is used to track pre-locked state of either
sigfastblock(2) or signal mask (if sigfastblock(2) is administratively
disabled).  It is fine for non-threaded images since there are no other
threads.  But I believe that it is fine for threaded images using libc_r
as well, since masking signals disables preemption (I did not tested
it).

NetBSD PR:	https://gnats.netbsd.org/56979
Reported by:	tmunro
Reviewed by:	markj
Sponsored by:	The FreeBSD Foundation
MFC after:	2 weeks
Differential revision:	https://reviews.freebsd.org/D36396
This commit is contained in:
Konstantin Belousov 2022-08-30 15:46:30 +03:00
parent a486fbbd78
commit a687683b99

View file

@ -124,16 +124,6 @@ def_lock_destroy(void *lock)
free(l->base);
}
static void
def_rlock_acquire(void *lock)
{
Lock *l = (Lock *)lock;
atomic_add_acq_int(&l->lock, RC_INCR);
while (l->lock & WAFLAG)
; /* Spin */
}
static void
sig_fastunblock(void)
{
@ -145,24 +135,37 @@ sig_fastunblock(void)
__sys_sigfastblock(SIGFASTBLOCK_UNBLOCK, NULL);
}
static void
def_wlock_acquire(void *lock)
static bool
def_lock_acquire_set(Lock *l, bool wlock)
{
if (wlock) {
if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
return (true);
} else {
atomic_add_acq_int(&l->lock, RC_INCR);
if ((l->lock & WAFLAG) == 0)
return (true);
atomic_add_int(&l->lock, -RC_INCR);
}
return (false);
}
static void
def_lock_acquire(Lock *l, bool wlock)
{
Lock *l;
sigset_t tmp_oldsigmask;
l = (Lock *)lock;
if (ld_fast_sigblock) {
for (;;) {
atomic_add_32(&fsigblock, SIGFASTBLOCK_INC);
if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
if (def_lock_acquire_set(l, wlock))
break;
sig_fastunblock();
}
} else {
for (;;) {
sigprocmask(SIG_BLOCK, &fullsigmask, &tmp_oldsigmask);
if (atomic_cmpset_acq_int(&l->lock, 0, WAFLAG))
if (def_lock_acquire_set(l, wlock))
break;
sigprocmask(SIG_SETMASK, &tmp_oldsigmask, NULL);
}
@ -171,21 +174,30 @@ def_wlock_acquire(void *lock)
}
}
static void
def_rlock_acquire(void *lock)
{
def_lock_acquire(lock, false);
}
static void
def_wlock_acquire(void *lock)
{
def_lock_acquire(lock, true);
}
static void
def_lock_release(void *lock)
{
Lock *l;
l = (Lock *)lock;
if ((l->lock & WAFLAG) == 0)
atomic_add_rel_int(&l->lock, -RC_INCR);
else {
atomic_add_rel_int(&l->lock, -WAFLAG);
if (ld_fast_sigblock)
sig_fastunblock();
else if (atomic_fetchadd_int(&wnested, -1) == 1)
sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
}
atomic_add_rel_int(&l->lock, -((l->lock & WAFLAG) == 0 ?
RC_INCR : WAFLAG));
if (ld_fast_sigblock)
sig_fastunblock();
else if (atomic_fetchadd_int(&wnested, -1) == 1)
sigprocmask(SIG_SETMASK, &oldsigmask, NULL);
}
static int