Fix the wrapper function around signals so that a signal handling

thread on one of the mutex or condition variable queues is removed
from those queues before the real signal handler is called.
This commit is contained in:
Mike Makonnen 2003-12-09 11:12:11 +00:00
parent 6fedbb4e37
commit 8955220107
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=123350
5 changed files with 57 additions and 58 deletions

View file

@ -419,24 +419,15 @@ _cond_wait_backout(pthread_t pthread)
if (cond == NULL)
return;
COND_LOCK(cond);
/* Process according to condition variable type: */
switch (cond->c_type) {
/* Fast condition variable: */
case COND_TYPE_FAST:
_thread_critical_enter(curthread);
cond_queue_remove(cond, pthread);
_thread_critical_exit(curthread);
break;
default:
break;
}
COND_UNLOCK(cond);
}
/*

View file

@ -1284,20 +1284,7 @@ _mutex_lock_backout(pthread_t pthread)
{
struct pthread_mutex *mutex;
/*
* Defer signals to protect the scheduling queues from
* access by the signal handler:
*/
/* _thread_kern_sig_defer();*/
/* XXX - Necessary to obey lock order */
UMTX_LOCK(&pthread->lock);
mutex = pthread->data.mutex;
UMTX_UNLOCK(&pthread->lock);
_SPINLOCK(&mutex->lock);
_thread_critical_enter(pthread);
if ((pthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0) {
mutex_queue_remove(mutex, pthread);
@ -1306,14 +1293,6 @@ _mutex_lock_backout(pthread_t pthread)
pthread->data.mutex = NULL;
}
/*
* Undefer and handle pending signals, yielding if
* necessary:
*/
/* _thread_kern_sig_undefer(); */
_thread_critical_exit(pthread);
_SPINUNLOCK(&mutex->lock);
}
/*
@ -1426,14 +1405,13 @@ get_mcontested(pthread_mutex_t mutexp)
{
int error;
_thread_critical_enter(curthread);
/*
* Put this thread on the mutex's list of waiting threads.
* The lock on the thread ensures atomic (as far as other
* threads are concerned) setting of the thread state with
* it's status on the mutex queue.
*/
_thread_critical_enter(curthread);
mutex_queue_enq(mutexp, curthread);
do {
PTHREAD_SET_STATE(curthread, PS_MUTEX_WAIT);
@ -1443,10 +1421,8 @@ get_mcontested(pthread_mutex_t mutexp)
error = _thread_suspend(curthread, NULL);
if (error != 0 && error != EAGAIN && error != EINTR)
PANIC("Cannot suspend on mutex.");
_SPINLOCK(&mutexp->lock);
_thread_critical_enter(curthread);
} while ((curthread->flags & PTHREAD_FLAGS_IN_MUTEXQ) != 0);
_thread_critical_exit(curthread);
}

View file

@ -782,6 +782,7 @@ int _pthread_mutexattr_settype(pthread_mutexattr_t *, int);
int _pthread_once(pthread_once_t *, void (*) (void));
pthread_t _pthread_self(void);
int _pthread_setspecific(pthread_key_t, const void *);
int _spintrylock(spinlock_t *);
void _thread_exit(char *, int, char *);
void _thread_exit_cleanup(void);
void *_thread_cleanup(pthread_t);

View file

@ -36,6 +36,8 @@
#include <setjmp.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include "thr_private.h"
/* #define DEBUG_SIGNAL */
@ -116,13 +118,10 @@ void
_thread_sig_wrapper(int sig, siginfo_t *info, void *context)
{
struct pthread_state_data psd;
struct sigaction *actp;
__siginfohandler_t *handler;
GIANT_LOCK(curthread);
/* Save the thread's previous state. */
psd.psd_wait_data = curthread->data;
psd.psd_state = curthread->state;
psd.psd_flags = curthread->flags;
struct umtx *up;
spinlock_t *sp;
/*
* Do a little cleanup handling for those threads in
@ -130,33 +129,54 @@ _thread_sig_wrapper(int sig, siginfo_t *info, void *context)
* for these threads are temporarily blocked until
* after cleanup handling.
*/
switch (psd.psd_state) {
switch (curthread->state) {
case PS_COND_WAIT:
/*
* Cache the address, since it will not be available
* after it has been backed out.
*/
up = &curthread->data.cond->c_lock;
UMTX_LOCK(up);
_thread_critical_enter(curthread);
_cond_wait_backout(curthread);
psd.psd_state = PS_RUNNING;
UMTX_UNLOCK(up);
break;
case PS_MUTEX_WAIT:
/*
* Cache the address, since it will not be available
* after it has been backed out.
*/
sp = &curthread->data.mutex->lock;
_SPINLOCK(sp);
_thread_critical_enter(curthread);
_mutex_lock_backout(curthread);
psd.psd_state = PS_RUNNING;
_SPINUNLOCK(sp);
break;
default:
/*
* We need to lock the thread to read it's flags.
*/
_thread_critical_enter(curthread);
break;
}
if (_thread_sigact[sig -1].sa_handler != NULL) {
GIANT_UNLOCK(curthread);
handler = (__siginfohandler_t *)
_thread_sigact[sig - 1].sa_handler;
handler(sig, info, (ucontext_t *)context);
GIANT_LOCK(curthread);
}
/*
* We save the flags now so that any modifications done as part
* of the backout are reflected when the flags are restored.
*/
psd.psd_flags = curthread->flags;
/* Restore the signal frame. */
curthread->data = psd.psd_wait_data;
curthread->state = psd.psd_state;
curthread->flags = psd.psd_flags &
(PTHREAD_FLAGS_PRIVATE | PTHREAD_FLAGS_TRACE);
GIANT_UNLOCK(curthread);
PTHREAD_SET_STATE(curthread, PS_RUNNING);
_thread_critical_exit(curthread);
actp = proc_sigact_sigaction(sig);
handler = (__siginfohandler_t *)actp->sa_handler;
handler(sig, info, (ucontext_t *)context);
/* Restore the thread's flags, and make it runnable */
_thread_critical_enter(curthread);
curthread->flags = psd.psd_flags;
PTHREAD_NEW_STATE(curthread, PS_RUNNING);
_thread_critical_exit(curthread);
}

View file

@ -64,6 +64,17 @@ _spinlock(spinlock_t *lck)
abort();
}
int
_spintrylock(spinlock_t *lck)
{
int error;
error = umtx_lock((struct umtx *)lck, curthread->thr_id);
if (error != 0 && error != EBUSY)
abort();
return (error);
}
/*
* Lock a location for the running thread. Yield to allow other
* threads to run if this thread is blocked because the lock is