Add implementation of robust mutexes, hopefully close enough to the

intention of the POSIX IEEE Std 1003.1TM-2008/Cor 1-2013.

A robust mutex is guaranteed to be cleared by the system upon either
thread or process owner termination while the mutex is held.  The next
mutex locker is then notified about inconsistent mutex state and can
execute (or abandon) corrective actions.

The patch mostly consists of small changes here and there, adding
neccessary checks for the inconsistent and abandoned conditions into
existing paths.  Additionally, the thread exit handler was extended to
iterate over the userspace-maintained list of owned robust mutexes,
unlocking and marking as terminated each of them.

The list of owned robust mutexes cannot be maintained atomically
synchronous with the mutex lock state (it is possible in kernel, but
is too expensive).  Instead, for the duration of lock or unlock
operation, the current mutex is remembered in a special slot that is
also checked by the kernel at thread termination.

Kernel must be aware about the per-thread location of the heads of
robust mutex lists and the current active mutex slot.  When a thread
touches a robust mutex for the first time, a new umtx op syscall is
issued which informs about location of lists heads.

The umtx sleep queues for PP and PI mutexes are split between
non-robust and robust.

Somewhat unrelated changes in the patch:
1. Style.
2. The fix for proper tdfind() call use in umtxq_sleep_pi() for shared
   pi mutexes.
3. Removal of the userspace struct pthread_mutex m_owner field.
4. The sysctl kern.ipc.umtx_vnode_persistent is added, which controls
   the lifetime of the shared mutex associated with a vnode' page.

Reviewed by:	jilles (previous version, supposedly the objection was fixed)
Discussed with:	brooks, Martin Simmons <martin@lispworks.com> (some aspects)
Tested by:	pho
Sponsored by:	The FreeBSD Foundation
This commit is contained in:
Konstantin Belousov 2016-05-17 09:56:22 +00:00
parent 5105a92c49
commit 2a339d9e3d
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=300043
32 changed files with 1365 additions and 476 deletions

View file

@ -135,6 +135,9 @@ enum pthread_mutextype {
#define PTHREAD_MUTEX_DEFAULT PTHREAD_MUTEX_ERRORCHECK
#define PTHREAD_MUTEX_STALLED 0
#define PTHREAD_MUTEX_ROBUST 1
struct _pthread_cleanup_info {
__uintptr_t pthread_cleanup_pad[8];
};
@ -229,6 +232,8 @@ int pthread_mutexattr_settype(pthread_mutexattr_t *, int)
__nonnull(1);
int pthread_mutexattr_setpshared(pthread_mutexattr_t *, int)
__nonnull(1);
int pthread_mutex_consistent(pthread_mutex_t *__mutex)
__nonnull(1) __requires_exclusive(*__mutex);
int pthread_mutex_destroy(pthread_mutex_t *__mutex)
__nonnull(1) __requires_unlocked(*__mutex);
int pthread_mutex_init(pthread_mutex_t *__mutex,
@ -310,6 +315,11 @@ int pthread_mutex_setprioceiling(pthread_mutex_t *, int, int *);
int pthread_mutexattr_getprotocol(pthread_mutexattr_t *, int *);
int pthread_mutexattr_setprotocol(pthread_mutexattr_t *, int);
int pthread_mutexattr_getrobust(pthread_mutexattr_t *__restrict,
int *__restrict) __nonnull_all;
int pthread_mutexattr_setrobust(pthread_mutexattr_t *, int)
__nonnull(1);
int pthread_attr_getinheritsched(const pthread_attr_t *, int *);
int pthread_attr_getschedparam(const pthread_attr_t *,
struct sched_param *) __nonnull_all;

View file

@ -410,6 +410,9 @@ FBSD_1.3 {
};
FBSD_1.4 {
pthread_mutex_consistent;
pthread_mutexattr_getrobust;
pthread_mutexattr_setrobust;
scandir_b;
};

View file

@ -125,6 +125,9 @@ pthread_func_entry_t __thr_jtable[PJT_MAX] = {
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_CLEANUP_PUSH_IMP */
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_CANCEL_ENTER */
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_CANCEL_LEAVE */
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_MUTEX_CONSISTENT */
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_MUTEXATTR_GETROBUST */
{PJT_DUAL_ENTRY(stub_zero)}, /* PJT_MUTEXATTR_SETROBUST */
};
/*
@ -226,9 +229,14 @@ STUB_FUNC2(pthread_mutex_init, PJT_MUTEX_INIT, int, void *, void *)
STUB_FUNC1(pthread_mutex_lock, PJT_MUTEX_LOCK, int, void *)
STUB_FUNC1(pthread_mutex_trylock, PJT_MUTEX_TRYLOCK, int, void *)
STUB_FUNC1(pthread_mutex_unlock, PJT_MUTEX_UNLOCK, int, void *)
STUB_FUNC1(pthread_mutex_consistent, PJT_MUTEX_CONSISTENT, int, void *)
STUB_FUNC1(pthread_mutexattr_destroy, PJT_MUTEXATTR_DESTROY, int, void *)
STUB_FUNC1(pthread_mutexattr_init, PJT_MUTEXATTR_INIT, int, void *)
STUB_FUNC2(pthread_mutexattr_settype, PJT_MUTEXATTR_SETTYPE, int, void *, int)
STUB_FUNC2(pthread_mutexattr_getrobust, PJT_MUTEXATTR_GETROBUST, int, void *,
int *)
STUB_FUNC2(pthread_mutexattr_setrobust, PJT_MUTEXATTR_SETROBUST, int, void *,
int)
STUB_FUNC2(pthread_once, PJT_ONCE, int, void *, void *)
STUB_FUNC1(pthread_rwlock_destroy, PJT_RWLOCK_DESTROY, int, void *)
STUB_FUNC2(pthread_rwlock_init, PJT_RWLOCK_INIT, int, void *, void *)

View file

@ -168,6 +168,9 @@ typedef enum {
PJT_CLEANUP_PUSH_IMP,
PJT_CANCEL_ENTER,
PJT_CANCEL_LEAVE,
PJT_MUTEX_CONSISTENT,
PJT_MUTEXATTR_GETROBUST,
PJT_MUTEXATTR_SETROBUST,
PJT_MAX
} pjt_index_t;

View file

@ -315,3 +315,9 @@ FBSD_1.1 {
FBSD_1.2 {
pthread_getthreadid_np;
};
FBSD_1.4 {
pthread_mutex_consistent;
pthread_mutexattr_getrobust;
pthread_mutexattr_setrobust;
};

View file

@ -188,46 +188,57 @@ _pthread_cond_destroy(pthread_cond_t *cond)
*/
static int
cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
const struct timespec *abstime, int cancel)
const struct timespec *abstime, int cancel)
{
struct pthread *curthread = _get_curthread();
int recurse;
int error, error2 = 0;
struct pthread *curthread;
int error, error2, recurse, robust;
curthread = _get_curthread();
robust = _mutex_enter_robust(curthread, mp);
error = _mutex_cv_detach(mp, &recurse);
if (error != 0)
if (error != 0) {
if (robust)
_mutex_leave_robust(curthread, mp);
return (error);
if (cancel) {
_thr_cancel_enter2(curthread, 0);
error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
(struct umutex *)&mp->m_lock, abstime,
CVWAIT_ABSTIME|CVWAIT_CLOCKID);
_thr_cancel_leave(curthread, 0);
} else {
error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
(struct umutex *)&mp->m_lock, abstime,
CVWAIT_ABSTIME|CVWAIT_CLOCKID);
}
if (cancel)
_thr_cancel_enter2(curthread, 0);
error = _thr_ucond_wait((struct ucond *)&cvp->__has_kern_waiters,
(struct umutex *)&mp->m_lock, abstime, CVWAIT_ABSTIME |
CVWAIT_CLOCKID);
if (cancel)
_thr_cancel_leave(curthread, 0);
/*
* Note that PP mutex and ROBUST mutex may return
* interesting error codes.
*/
if (error == 0) {
error2 = _mutex_cv_lock(mp, recurse);
error2 = _mutex_cv_lock(mp, recurse, true);
} else if (error == EINTR || error == ETIMEDOUT) {
error2 = _mutex_cv_lock(mp, recurse);
error2 = _mutex_cv_lock(mp, recurse, true);
/*
* Do not do cancellation on EOWNERDEAD there. The
* cancellation cleanup handler will use the protected
* state and unlock the mutex without making the state
* consistent and the state will be unrecoverable.
*/
if (error2 == 0 && cancel)
_thr_testcancel(curthread);
if (error == EINTR)
error = 0;
} else {
/* We know that it didn't unlock the mutex. */
error2 = _mutex_cv_attach(mp, recurse);
if (error2 == 0 && cancel)
_mutex_cv_attach(mp, recurse);
if (cancel)
_thr_testcancel(curthread);
error2 = 0;
}
if (robust)
_mutex_leave_robust(curthread, mp);
return (error2 != 0 ? error2 : error);
}
@ -240,14 +251,13 @@ cond_wait_kernel(struct pthread_cond *cvp, struct pthread_mutex *mp,
static int
cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
const struct timespec *abstime, int cancel)
const struct timespec *abstime, int cancel)
{
struct pthread *curthread = _get_curthread();
struct pthread *curthread;
struct sleepqueue *sq;
int recurse;
int error;
int defered;
int deferred, error, error2, recurse;
curthread = _get_curthread();
if (curthread->wchan != NULL)
PANIC("thread was already on queue.");
@ -260,32 +270,31 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
* us to check it without locking in pthread_cond_signal().
*/
cvp->__has_user_waiters = 1;
defered = 0;
(void)_mutex_cv_unlock(mp, &recurse, &defered);
deferred = 0;
(void)_mutex_cv_unlock(mp, &recurse, &deferred);
curthread->mutex_obj = mp;
_sleepq_add(cvp, curthread);
for(;;) {
_thr_clear_wake(curthread);
_sleepq_unlock(cvp);
if (defered) {
defered = 0;
if (deferred) {
deferred = 0;
if ((mp->m_lock.m_owner & UMUTEX_CONTESTED) == 0)
(void)_umtx_op_err(&mp->m_lock, UMTX_OP_MUTEX_WAKE2,
mp->m_lock.m_flags, 0, 0);
(void)_umtx_op_err(&mp->m_lock,
UMTX_OP_MUTEX_WAKE2, mp->m_lock.m_flags,
0, 0);
}
if (curthread->nwaiter_defer > 0) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
}
if (cancel) {
if (cancel)
_thr_cancel_enter2(curthread, 0);
error = _thr_sleep(curthread, cvp->__clock_id, abstime);
error = _thr_sleep(curthread, cvp->__clock_id, abstime);
if (cancel)
_thr_cancel_leave(curthread, 0);
} else {
error = _thr_sleep(curthread, cvp->__clock_id, abstime);
}
_sleepq_lock(cvp);
if (curthread->wchan == NULL) {
@ -293,25 +302,26 @@ cond_wait_user(struct pthread_cond *cvp, struct pthread_mutex *mp,
break;
} else if (cancel && SHOULD_CANCEL(curthread)) {
sq = _sleepq_lookup(cvp);
cvp->__has_user_waiters =
_sleepq_remove(sq, curthread);
cvp->__has_user_waiters = _sleepq_remove(sq, curthread);
_sleepq_unlock(cvp);
curthread->mutex_obj = NULL;
_mutex_cv_lock(mp, recurse);
error2 = _mutex_cv_lock(mp, recurse, false);
if (!THR_IN_CRITICAL(curthread))
_pthread_exit(PTHREAD_CANCELED);
else /* this should not happen */
return (0);
return (error2);
} else if (error == ETIMEDOUT) {
sq = _sleepq_lookup(cvp);
cvp->__has_user_waiters =
_sleepq_remove(sq, curthread);
_sleepq_remove(sq, curthread);
break;
}
}
_sleepq_unlock(cvp);
curthread->mutex_obj = NULL;
_mutex_cv_lock(mp, recurse);
error2 = _mutex_cv_lock(mp, recurse, false);
if (error == 0)
error = error2;
return (error);
}
@ -338,12 +348,12 @@ cond_wait_common(pthread_cond_t *cond, pthread_mutex_t *mutex,
return (error);
if (curthread->attr.sched_policy != SCHED_OTHER ||
(mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT|UMUTEX_PRIO_INHERIT|
USYNC_PROCESS_SHARED)) != 0 ||
(mp->m_lock.m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT |
USYNC_PROCESS_SHARED)) != 0 ||
(cvp->__flags & USYNC_PROCESS_SHARED) != 0)
return cond_wait_kernel(cvp, mp, abstime, cancel);
return (cond_wait_kernel(cvp, mp, abstime, cancel));
else
return cond_wait_user(cvp, mp, abstime, cancel);
return (cond_wait_user(cvp, mp, abstime, cancel));
}
int
@ -420,15 +430,15 @@ cond_signal_common(pthread_cond_t *cond)
td = _sleepq_first(sq);
mp = td->mutex_obj;
cvp->__has_user_waiters = _sleepq_remove(sq, td);
if (mp->m_owner == TID(curthread)) {
if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
}
curthread->defer_waiters[curthread->nwaiter_defer++] =
&td->wake_addr->value;
mp->m_flags |= PMUTEX_FLAG_DEFERED;
&td->wake_addr->value;
mp->m_flags |= PMUTEX_FLAG_DEFERRED;
} else {
waddr = &td->wake_addr->value;
}
@ -452,15 +462,15 @@ drop_cb(struct pthread *td, void *arg)
struct pthread *curthread = ba->curthread;
mp = td->mutex_obj;
if (mp->m_owner == TID(curthread)) {
if (PMUTEX_OWNER_ID(mp) == TID(curthread)) {
if (curthread->nwaiter_defer >= MAX_DEFER_WAITERS) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
}
curthread->defer_waiters[curthread->nwaiter_defer++] =
&td->wake_addr->value;
mp->m_flags |= PMUTEX_FLAG_DEFERED;
&td->wake_addr->value;
mp->m_flags |= PMUTEX_FLAG_DEFERRED;
} else {
if (ba->count >= MAX_DEFER_WAITERS) {
_thr_wake_all(ba->waddrs, ba->count);

View file

@ -94,6 +94,7 @@ struct pthread_mutex_attr _pthread_mutexattr_default = {
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
.m_robust = PTHREAD_MUTEX_STALLED,
};
struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = {
@ -101,6 +102,7 @@ struct pthread_mutex_attr _pthread_mutexattr_adaptive_default = {
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
.m_robust = PTHREAD_MUTEX_STALLED,
};
/* Default condition variable attributes: */
@ -265,7 +267,10 @@ static pthread_func_t jmp_table[][2] = {
{DUAL_ENTRY(__pthread_cleanup_pop_imp)},/* PJT_CLEANUP_POP_IMP */
{DUAL_ENTRY(__pthread_cleanup_push_imp)},/* PJT_CLEANUP_PUSH_IMP */
{DUAL_ENTRY(_pthread_cancel_enter)}, /* PJT_CANCEL_ENTER */
{DUAL_ENTRY(_pthread_cancel_leave)} /* PJT_CANCEL_LEAVE */
{DUAL_ENTRY(_pthread_cancel_leave)}, /* PJT_CANCEL_LEAVE */
{DUAL_ENTRY(_pthread_mutex_consistent)},/* PJT_MUTEX_CONSISTENT */
{DUAL_ENTRY(_pthread_mutexattr_getrobust)},/* PJT_MUTEXATTR_GETROBUST */
{DUAL_ENTRY(_pthread_mutexattr_setrobust)},/* PJT_MUTEXATTR_SETROBUST */
};
static int init_once = 0;
@ -308,7 +313,7 @@ _libpthread_init(struct pthread *curthread)
int first, dlopened;
/* Check if this function has already been called: */
if ((_thr_initial != NULL) && (curthread == NULL))
if (_thr_initial != NULL && curthread == NULL)
/* Only initialize the threaded application once. */
return;
@ -316,7 +321,7 @@ _libpthread_init(struct pthread *curthread)
* Check the size of the jump table to make sure it is preset
* with the correct number of entries.
*/
if (sizeof(jmp_table) != (sizeof(pthread_func_t) * PJT_MAX * 2))
if (sizeof(jmp_table) != sizeof(pthread_func_t) * PJT_MAX * 2)
PANIC("Thread jump table not properly initialized");
memcpy(__thr_jtable, jmp_table, sizeof(jmp_table));
__thr_interpose_libc();

View file

@ -1,7 +1,7 @@
/*
* Copyright (c) 1995 John Birrell <jb@cimlogic.com.au>.
* Copyright (c) 2006 David Xu <davidxu@freebsd.org>.
* Copyright (c) 2015 The FreeBSD Foundation
* Copyright (c) 2015, 2016 The FreeBSD Foundation
*
* All rights reserved.
*
@ -39,7 +39,6 @@
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <stdbool.h>
#include "namespace.h"
#include <stdlib.h>
#include <errno.h>
@ -64,6 +63,7 @@ _Static_assert(sizeof(struct pthread_mutex) <= PAGE_SIZE,
/*
* Prototypes
*/
int __pthread_mutex_consistent(pthread_mutex_t *mutex);
int __pthread_mutex_init(pthread_mutex_t *mutex,
const pthread_mutexattr_t *mutex_attr);
int __pthread_mutex_trylock(pthread_mutex_t *mutex);
@ -82,9 +82,13 @@ int __pthread_mutex_setyieldloops_np(pthread_mutex_t *mutex, int count);
static int mutex_self_trylock(pthread_mutex_t);
static int mutex_self_lock(pthread_mutex_t,
const struct timespec *abstime);
static int mutex_unlock_common(struct pthread_mutex *, int, int *);
static int mutex_unlock_common(struct pthread_mutex *, bool, int *);
static int mutex_lock_sleep(struct pthread *, pthread_mutex_t,
const struct timespec *);
static void mutex_init_robust(struct pthread *curthread);
static int mutex_qidx(struct pthread_mutex *m);
static bool is_robust_mutex(struct pthread_mutex *m);
static bool is_pshared_mutex(struct pthread_mutex *m);
__weak_reference(__pthread_mutex_init, pthread_mutex_init);
__strong_reference(__pthread_mutex_init, _pthread_mutex_init);
@ -94,6 +98,8 @@ __weak_reference(__pthread_mutex_timedlock, pthread_mutex_timedlock);
__strong_reference(__pthread_mutex_timedlock, _pthread_mutex_timedlock);
__weak_reference(__pthread_mutex_trylock, pthread_mutex_trylock);
__strong_reference(__pthread_mutex_trylock, _pthread_mutex_trylock);
__weak_reference(_pthread_mutex_consistent, pthread_mutex_consistent);
__strong_reference(_pthread_mutex_consistent, __pthread_mutex_consistent);
/* Single underscore versions provided for libc internal usage: */
/* No difference between libc and application usage of these: */
@ -125,23 +131,23 @@ mutex_init_link(struct pthread_mutex *m)
}
static void
mutex_assert_is_owned(struct pthread_mutex *m)
mutex_assert_is_owned(struct pthread_mutex *m __unused)
{
#if defined(_PTHREADS_INVARIANTS)
if (__predict_false(m->m_qe.tqe_prev == NULL)) {
char msg[128];
snprintf(msg, sizeof(msg),
"mutex %p own %#x %#x is not on list %p %p",
m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
m->m_qe.tqe_next);
"mutex %p own %#x is not on list %p %p",
m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
PANIC(msg);
}
#endif
}
static void
mutex_assert_not_owned(struct pthread_mutex *m)
mutex_assert_not_owned(struct pthread *curthread __unused,
struct pthread_mutex *m __unused)
{
#if defined(_PTHREADS_INVARIANTS)
@ -149,21 +155,68 @@ mutex_assert_not_owned(struct pthread_mutex *m)
m->m_qe.tqe_next != NULL)) {
char msg[128];
snprintf(msg, sizeof(msg),
"mutex %p own %#x %#x is on list %p %p",
m, m->m_lock.m_owner, m->m_owner, m->m_qe.tqe_prev,
m->m_qe.tqe_next);
"mutex %p own %#x is on list %p %p",
m, m->m_lock.m_owner, m->m_qe.tqe_prev, m->m_qe.tqe_next);
PANIC(msg);
}
if (__predict_false(is_robust_mutex(m) &&
(m->m_lock.m_rb_lnk != 0 || m->m_rb_prev != NULL ||
(is_pshared_mutex(m) && curthread->robust_list ==
(uintptr_t)&m->m_lock) ||
(!is_pshared_mutex(m) && curthread->priv_robust_list ==
(uintptr_t)&m->m_lock)))) {
char msg[128];
snprintf(msg, sizeof(msg),
"mutex %p own %#x is on robust linkage %p %p head %p phead %p",
m, m->m_lock.m_owner, (void *)m->m_lock.m_rb_lnk,
m->m_rb_prev, (void *)curthread->robust_list,
(void *)curthread->priv_robust_list);
PANIC(msg);
}
#endif
}
static int
static bool
is_pshared_mutex(struct pthread_mutex *m)
{
return ((m->m_lock.m_flags & USYNC_PROCESS_SHARED) != 0);
}
static bool
is_robust_mutex(struct pthread_mutex *m)
{
return ((m->m_lock.m_flags & UMUTEX_ROBUST) != 0);
}
int
_mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
{
#if defined(_PTHREADS_INVARIANTS)
if (__predict_false(curthread->inact_mtx != 0))
PANIC("inact_mtx enter");
#endif
if (!is_robust_mutex(m))
return (0);
mutex_init_robust(curthread);
curthread->inact_mtx = (uintptr_t)&m->m_lock;
return (1);
}
void
_mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m __unused)
{
#if defined(_PTHREADS_INVARIANTS)
if (__predict_false(curthread->inact_mtx != (uintptr_t)&m->m_lock))
PANIC("inact_mtx leave");
#endif
curthread->inact_mtx = 0;
}
static int
mutex_check_attr(const struct pthread_mutex_attr *attr)
{
@ -177,13 +230,28 @@ mutex_check_attr(const struct pthread_mutex_attr *attr)
return (0);
}
static void
mutex_init_robust(struct pthread *curthread)
{
struct umtx_robust_lists_params rb;
if (curthread == NULL)
curthread = _get_curthread();
if (curthread->robust_inited)
return;
rb.robust_list_offset = (uintptr_t)&curthread->robust_list;
rb.robust_priv_list_offset = (uintptr_t)&curthread->priv_robust_list;
rb.robust_inact_offset = (uintptr_t)&curthread->inact_mtx;
_umtx_op(NULL, UMTX_OP_ROBUST_LISTS, sizeof(rb), &rb, NULL);
curthread->robust_inited = 1;
}
static void
mutex_init_body(struct pthread_mutex *pmutex,
const struct pthread_mutex_attr *attr)
{
pmutex->m_flags = attr->m_type;
pmutex->m_owner = 0;
pmutex->m_count = 0;
pmutex->m_spinloops = 0;
pmutex->m_yieldloops = 0;
@ -205,7 +273,10 @@ mutex_init_body(struct pthread_mutex *pmutex,
}
if (attr->m_pshared == PTHREAD_PROCESS_SHARED)
pmutex->m_lock.m_flags |= USYNC_PROCESS_SHARED;
if (attr->m_robust == PTHREAD_MUTEX_ROBUST) {
mutex_init_robust(NULL);
pmutex->m_lock.m_flags |= UMUTEX_ROBUST;
}
if (PMUTEX_TYPE(pmutex->m_flags) == PTHREAD_MUTEX_ADAPTIVE_NP) {
pmutex->m_spinloops =
_thr_spinloops ? _thr_spinloops: MUTEX_ADAPTIVE_SPINS;
@ -262,7 +333,7 @@ set_inherited_priority(struct pthread *curthread, struct pthread_mutex *m)
{
struct pthread_mutex *m2;
m2 = TAILQ_LAST(&curthread->mq[TMQ_NORM_PP], mutex_queue);
m2 = TAILQ_LAST(&curthread->mq[mutex_qidx(m)], mutex_queue);
if (m2 != NULL)
m->m_lock.m_ceilings[1] = m2->m_lock.m_ceilings[0];
else
@ -277,7 +348,8 @@ shared_mutex_init(struct pthread_mutex *pmtx, const struct
.m_type = PTHREAD_MUTEX_DEFAULT,
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_SHARED
.m_pshared = PTHREAD_PROCESS_SHARED,
.m_robust = PTHREAD_MUTEX_STALLED,
};
bool done;
@ -329,7 +401,7 @@ __pthread_mutex_init(pthread_mutex_t *mutex,
if (mutex_attr == NULL ||
(*mutex_attr)->m_pshared == PTHREAD_PROCESS_PRIVATE) {
return (mutex_init(mutex, mutex_attr ? *mutex_attr : NULL,
calloc));
calloc));
}
pmtx = __thr_pshared_offpage(mutex, 1);
if (pmtx == NULL)
@ -349,6 +421,7 @@ _pthread_mutex_init_calloc_cb(pthread_mutex_t *mutex,
.m_protocol = PTHREAD_PRIO_NONE,
.m_ceiling = 0,
.m_pshared = PTHREAD_PROCESS_PRIVATE,
.m_robust = PTHREAD_MUTEX_STALLED,
};
int ret;
@ -378,7 +451,6 @@ queue_fork(struct pthread *curthread, struct mutex_queue *q,
TAILQ_FOREACH(m, qp, m_pqe) {
TAILQ_INSERT_TAIL(q, m, m_qe);
m->m_lock.m_owner = TID(curthread) | bit;
m->m_owner = TID(curthread);
}
}
@ -390,6 +462,9 @@ _mutex_fork(struct pthread *curthread)
&curthread->mq[TMQ_NORM_PRIV], 0);
queue_fork(curthread, &curthread->mq[TMQ_NORM_PP],
&curthread->mq[TMQ_NORM_PP_PRIV], UMUTEX_CONTESTED);
queue_fork(curthread, &curthread->mq[TMQ_ROBUST_PP],
&curthread->mq[TMQ_ROBUST_PP_PRIV], UMUTEX_CONTESTED);
curthread->robust_list = 0;
}
int
@ -407,17 +482,18 @@ _pthread_mutex_destroy(pthread_mutex_t *mutex)
if (m == THR_PSHARED_PTR) {
m1 = __thr_pshared_offpage(mutex, 0);
if (m1 != NULL) {
mutex_assert_not_owned(m1);
mutex_assert_not_owned(_get_curthread(), m1);
__thr_pshared_destroy(mutex);
}
*mutex = THR_MUTEX_DESTROYED;
return (0);
}
if (m->m_owner != 0) {
if (PMUTEX_OWNER_ID(m) != 0 &&
(uint32_t)m->m_lock.m_owner != UMUTEX_RB_NOTRECOV) {
ret = EBUSY;
} else {
*mutex = THR_MUTEX_DESTROYED;
mutex_assert_not_owned(m);
mutex_assert_not_owned(_get_curthread(), m);
free(m);
ret = 0;
}
@ -432,31 +508,81 @@ mutex_qidx(struct pthread_mutex *m)
if ((m->m_lock.m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (TMQ_NORM);
return (TMQ_NORM_PP);
return (is_robust_mutex(m) ? TMQ_ROBUST_PP : TMQ_NORM_PP);
}
/*
* Both enqueue_mutex() and dequeue_mutex() operate on the
* thread-private linkage of the locked mutexes and on the robust
* linkage.
*
* Robust list, as seen by kernel, must be consistent even in the case
* of thread termination at arbitrary moment. Since either enqueue or
* dequeue for list walked by kernel consists of rewriting a single
* forward pointer, it is safe. On the other hand, rewrite of the
* back pointer is not atomic WRT the forward one, but kernel does not
* care.
*/
static void
enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m)
enqueue_mutex(struct pthread *curthread, struct pthread_mutex *m,
int error)
{
struct pthread_mutex *m1;
uintptr_t *rl;
int qidx;
m->m_owner = TID(curthread);
/* Add to the list of owned mutexes: */
mutex_assert_not_owned(m);
if (error != EOWNERDEAD)
mutex_assert_not_owned(curthread, m);
qidx = mutex_qidx(m);
TAILQ_INSERT_TAIL(&curthread->mq[qidx], m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_INSERT_TAIL(&curthread->mq[qidx + 1], m, m_pqe);
if (is_robust_mutex(m)) {
rl = is_pshared_mutex(m) ? &curthread->robust_list :
&curthread->priv_robust_list;
m->m_rb_prev = NULL;
if (*rl != 0) {
m1 = __containerof((void *)*rl,
struct pthread_mutex, m_lock);
m->m_lock.m_rb_lnk = (uintptr_t)&m1->m_lock;
m1->m_rb_prev = m;
} else {
m1 = NULL;
m->m_lock.m_rb_lnk = 0;
}
*rl = (uintptr_t)&m->m_lock;
}
}
static void
dequeue_mutex(struct pthread *curthread, struct pthread_mutex *m)
{
struct pthread_mutex *mp, *mn;
int qidx;
m->m_owner = 0;
mutex_assert_is_owned(m);
qidx = mutex_qidx(m);
if (is_robust_mutex(m)) {
mp = m->m_rb_prev;
if (mp == NULL) {
if (is_pshared_mutex(m)) {
curthread->robust_list = m->m_lock.m_rb_lnk;
} else {
curthread->priv_robust_list =
m->m_lock.m_rb_lnk;
}
} else {
mp->m_lock.m_rb_lnk = m->m_lock.m_rb_lnk;
}
if (m->m_lock.m_rb_lnk != 0) {
mn = __containerof((void *)m->m_lock.m_rb_lnk,
struct pthread_mutex, m_lock);
mn->m_rb_prev = m->m_rb_prev;
}
m->m_lock.m_rb_lnk = 0;
m->m_rb_prev = NULL;
}
TAILQ_REMOVE(&curthread->mq[qidx], m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_REMOVE(&curthread->mq[qidx + 1], m, m_pqe);
@ -496,7 +622,7 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
struct pthread *curthread;
struct pthread_mutex *m;
uint32_t id;
int ret;
int ret, robust;
ret = check_and_init_mutex(mutex, &m);
if (ret != 0)
@ -505,27 +631,32 @@ __pthread_mutex_trylock(pthread_mutex_t *mutex)
id = TID(curthread);
if (m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
robust = _mutex_enter_robust(curthread, m);
ret = _thr_umutex_trylock(&m->m_lock, id);
if (__predict_true(ret == 0)) {
enqueue_mutex(curthread, m);
} else if (m->m_owner == id) {
if (__predict_true(ret == 0) || ret == EOWNERDEAD) {
enqueue_mutex(curthread, m, ret);
if (ret == EOWNERDEAD)
m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
} else if (PMUTEX_OWNER_ID(m) == id) {
ret = mutex_self_trylock(m);
} /* else {} */
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE))
if (robust)
_mutex_leave_robust(curthread, m);
if ((ret == 0 || ret == EOWNERDEAD) &&
(m->m_flags & PMUTEX_FLAG_PRIVATE) != 0)
THR_CRITICAL_LEAVE(curthread);
return (ret);
}
static int
mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
const struct timespec *abstime)
const struct timespec *abstime)
{
uint32_t id, owner;
int count;
int ret;
uint32_t id, owner;
int count, ret;
id = TID(curthread);
if (m->m_owner == id)
if (PMUTEX_OWNER_ID(m) == id)
return (mutex_self_lock(m, abstime));
/*
@ -534,10 +665,9 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
* the lock is likely to be released quickly and it is
* faster than entering the kernel
*/
if (__predict_false(
(m->m_lock.m_flags &
(UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0))
goto sleep_in_kernel;
if (__predict_false((m->m_lock.m_flags & (UMUTEX_PRIO_PROTECT |
UMUTEX_PRIO_INHERIT | UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) != 0))
goto sleep_in_kernel;
if (!_thr_is_smp)
goto yield_loop;
@ -546,7 +676,8 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
while (count--) {
owner = m->m_lock.m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0) {
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
id | owner)) {
ret = 0;
goto done;
}
@ -560,7 +691,8 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
_sched_yield();
owner = m->m_lock.m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0) {
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner, id|owner)) {
if (atomic_cmpset_acq_32(&m->m_lock.m_owner, owner,
id | owner)) {
ret = 0;
goto done;
}
@ -568,38 +700,46 @@ mutex_lock_sleep(struct pthread *curthread, struct pthread_mutex *m,
}
sleep_in_kernel:
if (abstime == NULL) {
if (abstime == NULL)
ret = __thr_umutex_lock(&m->m_lock, id);
} else if (__predict_false(
abstime->tv_nsec < 0 ||
abstime->tv_nsec >= 1000000000)) {
else if (__predict_false(abstime->tv_nsec < 0 ||
abstime->tv_nsec >= 1000000000))
ret = EINVAL;
} else {
else
ret = __thr_umutex_timedlock(&m->m_lock, id, abstime);
}
done:
if (ret == 0)
enqueue_mutex(curthread, m);
if (ret == 0 || ret == EOWNERDEAD) {
enqueue_mutex(curthread, m, ret);
if (ret == EOWNERDEAD)
m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
}
return (ret);
}
static inline int
mutex_lock_common(struct pthread_mutex *m,
const struct timespec *abstime, int cvattach)
mutex_lock_common(struct pthread_mutex *m, const struct timespec *abstime,
bool cvattach, bool rb_onlist)
{
struct pthread *curthread = _get_curthread();
int ret;
struct pthread *curthread;
int ret, robust;
curthread = _get_curthread();
if (!cvattach && m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_ENTER(curthread);
if (_thr_umutex_trylock2(&m->m_lock, TID(curthread)) == 0) {
enqueue_mutex(curthread, m);
ret = 0;
if (!rb_onlist)
robust = _mutex_enter_robust(curthread, m);
ret = _thr_umutex_trylock2(&m->m_lock, TID(curthread));
if (ret == 0 || ret == EOWNERDEAD) {
enqueue_mutex(curthread, m, ret);
if (ret == EOWNERDEAD)
m->m_lock.m_flags |= UMUTEX_NONCONSISTENT;
} else {
ret = mutex_lock_sleep(curthread, m, abstime);
}
if (ret && (m->m_flags & PMUTEX_FLAG_PRIVATE) && !cvattach)
if (!rb_onlist && robust)
_mutex_leave_robust(curthread, m);
if (ret != 0 && ret != EOWNERDEAD &&
(m->m_flags & PMUTEX_FLAG_PRIVATE) != 0 && !cvattach)
THR_CRITICAL_LEAVE(curthread);
return (ret);
}
@ -613,7 +753,7 @@ __pthread_mutex_lock(pthread_mutex_t *mutex)
_thr_check_init();
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
ret = mutex_lock_common(m, NULL, 0);
ret = mutex_lock_common(m, NULL, false, false);
return (ret);
}
@ -627,7 +767,7 @@ __pthread_mutex_timedlock(pthread_mutex_t *mutex,
_thr_check_init();
ret = check_and_init_mutex(mutex, &m);
if (ret == 0)
ret = mutex_lock_common(m, abstime, 0);
ret = mutex_lock_common(m, abstime, false, false);
return (ret);
}
@ -644,16 +784,16 @@ _pthread_mutex_unlock(pthread_mutex_t *mutex)
} else {
mp = *mutex;
}
return (mutex_unlock_common(mp, 0, NULL));
return (mutex_unlock_common(mp, false, NULL));
}
int
_mutex_cv_lock(struct pthread_mutex *m, int count)
_mutex_cv_lock(struct pthread_mutex *m, int count, bool rb_onlist)
{
int error;
int error;
error = mutex_lock_common(m, NULL, 1);
if (error == 0)
error = mutex_lock_common(m, NULL, true, rb_onlist);
if (error == 0 || error == EOWNERDEAD)
m->m_count = count;
return (error);
}
@ -667,16 +807,17 @@ _mutex_cv_unlock(struct pthread_mutex *m, int *count, int *defer)
*/
*count = m->m_count;
m->m_count = 0;
(void)mutex_unlock_common(m, 1, defer);
(void)mutex_unlock_common(m, true, defer);
return (0);
}
int
_mutex_cv_attach(struct pthread_mutex *m, int count)
{
struct pthread *curthread = _get_curthread();
struct pthread *curthread;
enqueue_mutex(curthread, m);
curthread = _get_curthread();
enqueue_mutex(curthread, m, 0);
m->m_count = count;
return (0);
}
@ -684,12 +825,12 @@ _mutex_cv_attach(struct pthread_mutex *m, int count)
int
_mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
{
struct pthread *curthread = _get_curthread();
int defered;
int error;
struct pthread *curthread;
int deferred, error;
curthread = _get_curthread();
if ((error = _mutex_owned(curthread, mp)) != 0)
return (error);
return (error);
/*
* Clear the count in case this is a recursive mutex.
@ -699,15 +840,15 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
dequeue_mutex(curthread, mp);
/* Will this happen in real-world ? */
if ((mp->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
defered = 1;
mp->m_flags &= ~PMUTEX_FLAG_DEFERED;
if ((mp->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
deferred = 1;
mp->m_flags &= ~PMUTEX_FLAG_DEFERRED;
} else
defered = 0;
deferred = 0;
if (defered) {
if (deferred) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
}
return (0);
@ -716,7 +857,7 @@ _mutex_cv_detach(struct pthread_mutex *mp, int *recurse)
static int
mutex_self_trylock(struct pthread_mutex *m)
{
int ret;
int ret;
switch (PMUTEX_TYPE(m->m_flags)) {
case PTHREAD_MUTEX_ERRORCHECK:
@ -746,7 +887,7 @@ static int
mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
{
struct timespec ts1, ts2;
int ret;
int ret;
switch (PMUTEX_TYPE(m->m_flags)) {
case PTHREAD_MUTEX_ERRORCHECK:
@ -812,11 +953,11 @@ mutex_self_lock(struct pthread_mutex *m, const struct timespec *abstime)
}
static int
mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
mutex_unlock_common(struct pthread_mutex *m, bool cv, int *mtx_defer)
{
struct pthread *curthread = _get_curthread();
struct pthread *curthread;
uint32_t id;
int defered, error;
int deferred, error, robust;
if (__predict_false(m <= THR_MUTEX_DESTROYED)) {
if (m == THR_MUTEX_DESTROYED)
@ -824,34 +965,39 @@ mutex_unlock_common(struct pthread_mutex *m, int cv, int *mtx_defer)
return (EPERM);
}
curthread = _get_curthread();
id = TID(curthread);
/*
* Check if the running thread is not the owner of the mutex.
*/
if (__predict_false(m->m_owner != id))
if (__predict_false(PMUTEX_OWNER_ID(m) != id))
return (EPERM);
error = 0;
if (__predict_false(
PMUTEX_TYPE(m->m_flags) == PTHREAD_MUTEX_RECURSIVE &&
m->m_count > 0)) {
if (__predict_false(PMUTEX_TYPE(m->m_flags) ==
PTHREAD_MUTEX_RECURSIVE && m->m_count > 0)) {
m->m_count--;
} else {
if ((m->m_flags & PMUTEX_FLAG_DEFERED) != 0) {
defered = 1;
m->m_flags &= ~PMUTEX_FLAG_DEFERED;
if ((m->m_flags & PMUTEX_FLAG_DEFERRED) != 0) {
deferred = 1;
m->m_flags &= ~PMUTEX_FLAG_DEFERRED;
} else
defered = 0;
deferred = 0;
robust = _mutex_enter_robust(curthread, m);
dequeue_mutex(curthread, m);
error = _thr_umutex_unlock2(&m->m_lock, id, mtx_defer);
if (mtx_defer == NULL && defered) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
if (deferred) {
if (mtx_defer == NULL) {
_thr_wake_all(curthread->defer_waiters,
curthread->nwaiter_defer);
curthread->nwaiter_defer = 0;
} else
*mtx_defer = 1;
}
if (robust)
_mutex_leave_robust(curthread, m);
}
if (!cv && m->m_flags & PMUTEX_FLAG_PRIVATE)
THR_CRITICAL_LEAVE(curthread);
@ -887,7 +1033,7 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
struct pthread *curthread;
struct pthread_mutex *m, *m1, *m2;
struct mutex_queue *q, *qp;
int ret;
int qidx, ret;
if (*mutex == THR_PSHARED_PTR) {
m = __thr_pshared_offpage(mutex, 0);
@ -907,14 +1053,15 @@ _pthread_mutex_setprioceiling(pthread_mutex_t *mutex,
return (ret);
curthread = _get_curthread();
if (m->m_owner == TID(curthread)) {
if (PMUTEX_OWNER_ID(m) == TID(curthread)) {
mutex_assert_is_owned(m);
m1 = TAILQ_PREV(m, mutex_queue, m_qe);
m2 = TAILQ_NEXT(m, m_qe);
if ((m1 != NULL && m1->m_lock.m_ceilings[0] > (u_int)ceiling) ||
(m2 != NULL && m2->m_lock.m_ceilings[0] < (u_int)ceiling)) {
q = &curthread->mq[TMQ_NORM_PP];
qp = &curthread->mq[TMQ_NORM_PP_PRIV];
qidx = mutex_qidx(m);
q = &curthread->mq[qidx];
qp = &curthread->mq[qidx + 1];
TAILQ_REMOVE(q, m, m_qe);
if (!is_pshared_mutex(m))
TAILQ_REMOVE(qp, m, m_pqe);
@ -1009,18 +1156,45 @@ _pthread_mutex_isowned_np(pthread_mutex_t *mutex)
if (m <= THR_MUTEX_DESTROYED)
return (0);
}
return (m->m_owner == TID(_get_curthread()));
return (PMUTEX_OWNER_ID(m) == TID(_get_curthread()));
}
int
_mutex_owned(struct pthread *curthread, const struct pthread_mutex *mp)
{
if (__predict_false(mp <= THR_MUTEX_DESTROYED)) {
if (mp == THR_MUTEX_DESTROYED)
return (EINVAL);
return (EPERM);
}
if (mp->m_owner != TID(curthread))
if (PMUTEX_OWNER_ID(mp) != TID(curthread))
return (EPERM);
return (0);
}
int
_pthread_mutex_consistent(pthread_mutex_t *mutex)
{
struct pthread_mutex *m;
struct pthread *curthread;
if (*mutex == THR_PSHARED_PTR) {
m = __thr_pshared_offpage(mutex, 0);
if (m == NULL)
return (EINVAL);
shared_mutex_init(m, NULL);
} else {
m = *mutex;
if (m <= THR_MUTEX_DESTROYED)
return (EINVAL);
}
curthread = _get_curthread();
if ((m->m_lock.m_flags & (UMUTEX_ROBUST | UMUTEX_NONCONSISTENT)) !=
(UMUTEX_ROBUST | UMUTEX_NONCONSISTENT))
return (EINVAL);
if (PMUTEX_OWNER_ID(m) != TID(curthread))
return (EPERM);
m->m_lock.m_flags &= ~UMUTEX_NONCONSISTENT;
return (0);
}

View file

@ -80,8 +80,12 @@ __weak_reference(_pthread_mutexattr_getpshared, pthread_mutexattr_getpshared);
__weak_reference(_pthread_mutexattr_setpshared, pthread_mutexattr_setpshared);
__weak_reference(_pthread_mutexattr_getprotocol, pthread_mutexattr_getprotocol);
__weak_reference(_pthread_mutexattr_setprotocol, pthread_mutexattr_setprotocol);
__weak_reference(_pthread_mutexattr_getprioceiling, pthread_mutexattr_getprioceiling);
__weak_reference(_pthread_mutexattr_setprioceiling, pthread_mutexattr_setprioceiling);
__weak_reference(_pthread_mutexattr_getprioceiling,
pthread_mutexattr_getprioceiling);
__weak_reference(_pthread_mutexattr_setprioceiling,
pthread_mutexattr_setprioceiling);
__weak_reference(_pthread_mutexattr_getrobust, pthread_mutexattr_getrobust);
__weak_reference(_pthread_mutexattr_setrobust, pthread_mutexattr_setrobust);
int
_pthread_mutexattr_init(pthread_mutexattr_t *attr)
@ -119,26 +123,28 @@ int
_pthread_mutexattr_getkind_np(pthread_mutexattr_t attr)
{
int ret;
if (attr == NULL) {
errno = EINVAL;
ret = -1;
} else {
ret = attr->m_type;
}
return(ret);
return (ret);
}
int
_pthread_mutexattr_settype(pthread_mutexattr_t *attr, int type)
{
int ret;
if (attr == NULL || *attr == NULL || type >= PTHREAD_MUTEX_TYPE_MAX) {
ret = EINVAL;
} else {
(*attr)->m_type = type;
ret = 0;
}
return(ret);
return (ret);
}
int
@ -153,7 +159,7 @@ _pthread_mutexattr_gettype(pthread_mutexattr_t *attr, int *type)
*type = (*attr)->m_type;
ret = 0;
}
return ret;
return (ret);
}
int
@ -167,7 +173,7 @@ _pthread_mutexattr_destroy(pthread_mutexattr_t *attr)
*attr = NULL;
ret = 0;
}
return(ret);
return (ret);
}
int
@ -198,12 +204,12 @@ _pthread_mutexattr_getprotocol(pthread_mutexattr_t *mattr, int *protocol)
{
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL))
if (mattr == NULL || *mattr == NULL)
ret = EINVAL;
else
*protocol = (*mattr)->m_protocol;
return(ret);
return (ret);
}
int
@ -211,14 +217,14 @@ _pthread_mutexattr_setprotocol(pthread_mutexattr_t *mattr, int protocol)
{
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL) ||
(protocol < PTHREAD_PRIO_NONE) || (protocol > PTHREAD_PRIO_PROTECT))
if (mattr == NULL || *mattr == NULL ||
protocol < PTHREAD_PRIO_NONE || protocol > PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else {
(*mattr)->m_protocol = protocol;
(*mattr)->m_ceiling = THR_MAX_RR_PRIORITY;
}
return(ret);
return (ret);
}
int
@ -226,14 +232,14 @@ _pthread_mutexattr_getprioceiling(pthread_mutexattr_t *mattr, int *prioceiling)
{
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL))
if (mattr == NULL || *mattr == NULL)
ret = EINVAL;
else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else
*prioceiling = (*mattr)->m_ceiling;
return(ret);
return (ret);
}
int
@ -241,13 +247,44 @@ _pthread_mutexattr_setprioceiling(pthread_mutexattr_t *mattr, int prioceiling)
{
int ret = 0;
if ((mattr == NULL) || (*mattr == NULL))
if (mattr == NULL || *mattr == NULL)
ret = EINVAL;
else if ((*mattr)->m_protocol != PTHREAD_PRIO_PROTECT)
ret = EINVAL;
else
(*mattr)->m_ceiling = prioceiling;
return(ret);
return (ret);
}
int
_pthread_mutexattr_getrobust(pthread_mutexattr_t *mattr, int *robust)
{
int ret;
if (mattr == NULL || *mattr == NULL) {
ret = EINVAL;
} else {
ret = 0;
*robust = (*mattr)->m_robust;
}
return (ret);
}
int
_pthread_mutexattr_setrobust(pthread_mutexattr_t *mattr, int robust)
{
int ret;
if (mattr == NULL || *mattr == NULL) {
ret = EINVAL;
} else if (robust != PTHREAD_MUTEX_STALLED &&
robust != PTHREAD_MUTEX_ROBUST) {
ret = EINVAL;
} else {
ret = 0;
(*mattr)->m_robust = robust;
}
return (ret);
}

View file

@ -45,6 +45,7 @@
#include <errno.h>
#include <limits.h>
#include <signal.h>
#include <stdbool.h>
#include <stddef.h>
#include <stdio.h>
#include <unistd.h>
@ -141,9 +142,11 @@ TAILQ_HEAD(mutex_queue, pthread_mutex);
#define PMUTEX_FLAG_TYPE_MASK 0x0ff
#define PMUTEX_FLAG_PRIVATE 0x100
#define PMUTEX_FLAG_DEFERED 0x200
#define PMUTEX_FLAG_DEFERRED 0x200
#define PMUTEX_TYPE(mtxflags) ((mtxflags) & PMUTEX_FLAG_TYPE_MASK)
#define PMUTEX_OWNER_ID(m) ((m)->m_lock.m_owner & ~UMUTEX_CONTESTED)
#define MAX_DEFER_WAITERS 50
/*
@ -159,7 +162,6 @@ struct pthread_mutex {
*/
struct umutex m_lock;
int m_flags;
uint32_t m_owner;
int m_count;
int m_spinloops;
int m_yieldloops;
@ -171,6 +173,7 @@ struct pthread_mutex {
TAILQ_ENTRY(pthread_mutex) m_qe;
/* Link for all private mutexes a thread currently owns. */
TAILQ_ENTRY(pthread_mutex) m_pqe;
struct pthread_mutex *m_rb_prev;
};
struct pthread_mutex_attr {
@ -178,10 +181,12 @@ struct pthread_mutex_attr {
int m_protocol;
int m_ceiling;
int m_pshared;
int m_robust;
};
#define PTHREAD_MUTEXATTR_STATIC_INITIALIZER \
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE }
{ PTHREAD_MUTEX_DEFAULT, PTHREAD_PRIO_NONE, 0, MUTEX_FLAGS_PRIVATE, \
PTHREAD_MUTEX_STALLED }
struct pthread_cond {
__uint32_t __has_user_waiters;
@ -491,7 +496,9 @@ struct pthread {
#define TMQ_NORM_PRIV 1 /* NORMAL or PRIO_INHERIT normal priv */
#define TMQ_NORM_PP 2 /* PRIO_PROTECT normal mutexes */
#define TMQ_NORM_PP_PRIV 3 /* PRIO_PROTECT normal priv */
#define TMQ_NITEMS 4
#define TMQ_ROBUST_PP 4 /* PRIO_PROTECT robust mutexes */
#define TMQ_ROBUST_PP_PRIV 5 /* PRIO_PROTECT robust priv */
#define TMQ_NITEMS 6
struct mutex_queue mq[TMQ_NITEMS];
void *ret;
@ -545,6 +552,11 @@ struct pthread {
/* Number of threads deferred. */
int nwaiter_defer;
int robust_inited;
uintptr_t robust_list;
uintptr_t priv_robust_list;
uintptr_t inact_mtx;
/* Deferred threads from pthread_cond_signal. */
unsigned int *defer_waiters[MAX_DEFER_WAITERS];
#define _pthread_endzero wake_addr
@ -754,13 +766,17 @@ extern struct pthread *_single_thread __hidden;
*/
__BEGIN_DECLS
int _thr_setthreaded(int) __hidden;
int _mutex_cv_lock(struct pthread_mutex *, int) __hidden;
int _mutex_cv_lock(struct pthread_mutex *, int, bool) __hidden;
int _mutex_cv_unlock(struct pthread_mutex *, int *, int *) __hidden;
int _mutex_cv_attach(struct pthread_mutex *, int) __hidden;
int _mutex_cv_detach(struct pthread_mutex *, int *) __hidden;
int _mutex_owned(struct pthread *, const struct pthread_mutex *) __hidden;
int _mutex_reinit(pthread_mutex_t *) __hidden;
void _mutex_fork(struct pthread *curthread) __hidden;
int _mutex_enter_robust(struct pthread *curthread, struct pthread_mutex *m)
__hidden;
void _mutex_leave_robust(struct pthread *curthread, struct pthread_mutex *m)
__hidden;
void _libpthread_init(struct pthread *) __hidden;
struct pthread *_thr_alloc(struct pthread *) __hidden;
void _thread_exit(const char *, int, const char *) __hidden __dead2;
@ -819,6 +835,11 @@ void _pthread_cleanup_pop(int);
void _pthread_exit_mask(void *status, sigset_t *mask) __dead2 __hidden;
void _pthread_cancel_enter(int maycancel);
void _pthread_cancel_leave(int maycancel);
int _pthread_mutex_consistent(pthread_mutex_t *) __nonnull(1);
int _pthread_mutexattr_getrobust(pthread_mutexattr_t *__restrict,
int *__restrict) __nonnull_all;
int _pthread_mutexattr_setrobust(pthread_mutexattr_t *, int)
__nonnull(1);
/* #include <fcntl.h> */
#ifdef _SYS_FCNTL_H_

View file

@ -33,6 +33,7 @@ __FBSDID("$FreeBSD$");
#ifndef HAS__UMTX_OP_ERR
int _umtx_op_err(void *obj, int op, u_long val, void *uaddr, void *uaddr2)
{
if (_umtx_op(obj, op, val, uaddr, uaddr2) == -1)
return (errno);
return (0);
@ -60,19 +61,24 @@ __thr_umutex_lock(struct umutex *mtx, uint32_t id)
{
uint32_t owner;
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
for (;;) {
/* wait in kernel */
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
owner = mtx->m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
return (0);
}
for (;;) {
owner = mtx->m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
atomic_cmpset_acq_32(&mtx->m_owner, owner, id | owner))
return (0);
if (owner == UMUTEX_RB_OWNERDEAD &&
atomic_cmpset_acq_32(&mtx->m_owner, owner,
id | UMUTEX_CONTESTED))
return (EOWNERDEAD);
if (owner == UMUTEX_RB_NOTRECOV)
return (ENOTRECOVERABLE);
/* wait in kernel */
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
}
return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
}
#define SPINLOOPS 1000
@ -81,31 +87,33 @@ int
__thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
{
uint32_t owner;
int count;
if (!_thr_is_smp)
return __thr_umutex_lock(mtx, id);
return (__thr_umutex_lock(mtx, id));
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0)
return (_umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0));
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
for (;;) {
int count = SPINLOOPS;
while (count--) {
owner = mtx->m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0) {
if (atomic_cmpset_acq_32(
&mtx->m_owner,
owner, id|owner)) {
return (0);
}
}
CPU_SPINWAIT;
}
/* wait in kernel */
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
for (;;) {
count = SPINLOOPS;
while (count--) {
owner = mtx->m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
atomic_cmpset_acq_32(&mtx->m_owner, owner,
id | owner))
return (0);
if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
atomic_cmpset_acq_32(&mtx->m_owner, owner,
id | UMUTEX_CONTESTED))
return (EOWNERDEAD);
if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
return (ENOTRECOVERABLE);
CPU_SPINWAIT;
}
}
return _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0, 0, 0);
/* wait in kernel */
_umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0, 0, 0);
}
}
int
@ -129,21 +137,28 @@ __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
}
for (;;) {
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
/* wait in kernel */
ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
(void *)tm_size, __DECONST(void *, tm_p));
/* now try to lock it */
if ((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
UMUTEX_PRIO_INHERIT)) == 0) {
/* try to lock it */
owner = mtx->m_owner;
if ((owner & ~UMUTEX_CONTESTED) == 0 &&
atomic_cmpset_acq_32(&mtx->m_owner, owner, id|owner))
atomic_cmpset_acq_32(&mtx->m_owner, owner,
id | owner))
return (0);
if (__predict_false(owner == UMUTEX_RB_OWNERDEAD) &&
atomic_cmpset_acq_32(&mtx->m_owner, owner,
id | UMUTEX_CONTESTED))
return (EOWNERDEAD);
if (__predict_false(owner == UMUTEX_RB_NOTRECOV))
return (ENOTRECOVERABLE);
/* wait in kernel */
ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_WAIT, 0,
(void *)tm_size, __DECONST(void *, tm_p));
} else {
ret = _umtx_op_err(mtx, UMTX_OP_MUTEX_LOCK, 0,
(void *)tm_size, __DECONST(void *, tm_p));
if (ret == 0)
(void *)tm_size, __DECONST(void *, tm_p));
if (ret == 0 || ret == EOWNERDEAD ||
ret == ENOTRECOVERABLE)
break;
}
if (ret == ETIMEDOUT)
@ -155,46 +170,52 @@ __thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
int
__thr_umutex_unlock(struct umutex *mtx, uint32_t id)
{
return _umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0);
return (_umtx_op_err(mtx, UMTX_OP_MUTEX_UNLOCK, 0, 0, 0));
}
int
__thr_umutex_trylock(struct umutex *mtx)
{
return _umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0);
return (_umtx_op_err(mtx, UMTX_OP_MUTEX_TRYLOCK, 0, 0, 0));
}
int
__thr_umutex_set_ceiling(struct umutex *mtx, uint32_t ceiling,
uint32_t *oldceiling)
uint32_t *oldceiling)
{
return _umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0);
return (_umtx_op_err(mtx, UMTX_OP_SET_CEILING, ceiling, oldceiling, 0));
}
int
_thr_umtx_wait(volatile long *mtx, long id, const struct timespec *timeout)
{
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
return _umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
__DECONST(void*, timeout));
return (_umtx_op_err(__DEVOLATILE(void *, mtx), UMTX_OP_WAIT, id, 0,
__DECONST(void*, timeout)));
}
int
_thr_umtx_wait_uint(volatile u_int *mtx, u_int id, const struct timespec *timeout, int shared)
_thr_umtx_wait_uint(volatile u_int *mtx, u_int id,
const struct timespec *timeout, int shared)
{
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0)))
timeout->tv_nsec <= 0)))
return (ETIMEDOUT);
return _umtx_op_err(__DEVOLATILE(void *, mtx),
shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
__DECONST(void*, timeout));
return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id, 0,
__DECONST(void*, timeout)));
}
int
_thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
const struct timespec *abstime, int shared)
const struct timespec *abstime, int shared)
{
struct _umtx_time *tm_p, timeout;
size_t tm_size;
@ -210,21 +231,23 @@ _thr_umtx_timedwait_uint(volatile u_int *mtx, u_int id, int clockid,
tm_size = sizeof(timeout);
}
return _umtx_op_err(__DEVOLATILE(void *, mtx),
shared ? UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
(void *)tm_size, __DECONST(void *, tm_p));
return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
UMTX_OP_WAIT_UINT : UMTX_OP_WAIT_UINT_PRIVATE, id,
(void *)tm_size, __DECONST(void *, tm_p)));
}
int
_thr_umtx_wake(volatile void *mtx, int nr_wakeup, int shared)
{
return _umtx_op_err(__DEVOLATILE(void *, mtx), shared ? UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE,
nr_wakeup, 0, 0);
return (_umtx_op_err(__DEVOLATILE(void *, mtx), shared ?
UMTX_OP_WAKE : UMTX_OP_WAKE_PRIVATE, nr_wakeup, 0, 0));
}
void
_thr_ucond_init(struct ucond *cv)
{
bzero(cv, sizeof(struct ucond));
}
@ -232,30 +255,34 @@ int
_thr_ucond_wait(struct ucond *cv, struct umutex *m,
const struct timespec *timeout, int flags)
{
struct pthread *curthread;
if (timeout && (timeout->tv_sec < 0 || (timeout->tv_sec == 0 &&
timeout->tv_nsec <= 0))) {
struct pthread *curthread = _get_curthread();
curthread = _get_curthread();
_thr_umutex_unlock(m, TID(curthread));
return (ETIMEDOUT);
}
return _umtx_op_err(cv, UMTX_OP_CV_WAIT, flags,
m, __DECONST(void*, timeout));
return (_umtx_op_err(cv, UMTX_OP_CV_WAIT, flags, m,
__DECONST(void*, timeout)));
}
int
_thr_ucond_signal(struct ucond *cv)
{
if (!cv->c_has_waiters)
return (0);
return _umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL);
return (_umtx_op_err(cv, UMTX_OP_CV_SIGNAL, 0, NULL, NULL));
}
int
_thr_ucond_broadcast(struct ucond *cv)
{
if (!cv->c_has_waiters)
return (0);
return _umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL);
return (_umtx_op_err(cv, UMTX_OP_CV_BROADCAST, 0, NULL, NULL));
}
int
@ -275,7 +302,8 @@ __thr_rwlock_rdlock(struct urwlock *rwlock, int flags,
tm_p = &timeout;
tm_size = sizeof(timeout);
}
return _umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags, (void *)tm_size, tm_p);
return (_umtx_op_err(rwlock, UMTX_OP_RW_RDLOCK, flags,
(void *)tm_size, tm_p));
}
int
@ -294,13 +322,15 @@ __thr_rwlock_wrlock(struct urwlock *rwlock, const struct timespec *tsp)
tm_p = &timeout;
tm_size = sizeof(timeout);
}
return _umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size, tm_p);
return (_umtx_op_err(rwlock, UMTX_OP_RW_WRLOCK, 0, (void *)tm_size,
tm_p));
}
int
__thr_rwlock_unlock(struct urwlock *rwlock)
{
return _umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL);
return (_umtx_op_err(rwlock, UMTX_OP_RW_UNLOCK, 0, NULL, NULL));
}
void
@ -338,6 +368,7 @@ _thr_rwl_wrlock(struct urwlock *rwlock)
void
_thr_rwl_unlock(struct urwlock *rwlock)
{
if (_thr_rwlock_unlock(rwlock))
PANIC("unlock error");
}

View file

@ -32,7 +32,11 @@
#include <strings.h>
#include <sys/umtx.h>
#define DEFAULT_UMUTEX {0,0,{0,0},{0,0,0,0}}
#ifdef __LP64__
#define DEFAULT_UMUTEX {0,0,{0,0},0,{0,0}}
#else
#define DEFAULT_UMUTEX {0,0,{0,0},0,0,{0,0}}
#endif
#define DEFAULT_URWLOCK {0,0,0,0,{0,0,0,0}}
int _umtx_op_err(void *, int op, u_long, void *, void *) __hidden;
@ -75,95 +79,122 @@ void _thr_rwl_unlock(struct urwlock *rwlock) __hidden;
static inline int
_thr_umutex_trylock(struct umutex *mtx, uint32_t id)
{
if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
return (0);
if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (EBUSY);
return (__thr_umutex_trylock(mtx));
if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id))
return (0);
if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
id | UMUTEX_CONTESTED))
return (EOWNERDEAD);
if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
return (ENOTRECOVERABLE);
if ((mtx->m_flags & UMUTEX_PRIO_PROTECT) == 0)
return (EBUSY);
return (__thr_umutex_trylock(mtx));
}
static inline int
_thr_umutex_trylock2(struct umutex *mtx, uint32_t id)
{
if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
return (0);
if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
__predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0))
if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED, id | UMUTEX_CONTESTED))
if (atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_UNOWNED, id) != 0)
return (0);
return (EBUSY);
if ((uint32_t)mtx->m_owner == UMUTEX_CONTESTED &&
__predict_true((mtx->m_flags & (UMUTEX_PRIO_PROTECT |
UMUTEX_PRIO_INHERIT)) == 0) &&
atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_CONTESTED,
id | UMUTEX_CONTESTED))
return (0);
if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_OWNERDEAD) &&
atomic_cmpset_acq_32(&mtx->m_owner, UMUTEX_RB_OWNERDEAD,
id | UMUTEX_CONTESTED))
return (EOWNERDEAD);
if (__predict_false((uint32_t)mtx->m_owner == UMUTEX_RB_NOTRECOV))
return (ENOTRECOVERABLE);
return (EBUSY);
}
static inline int
_thr_umutex_lock(struct umutex *mtx, uint32_t id)
{
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_lock(mtx, id));
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_lock(mtx, id));
}
static inline int
_thr_umutex_lock_spin(struct umutex *mtx, uint32_t id)
{
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_lock_spin(mtx, id));
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_lock_spin(mtx, id));
}
static inline int
_thr_umutex_timedlock(struct umutex *mtx, uint32_t id,
const struct timespec *timeout)
const struct timespec *timeout)
{
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_timedlock(mtx, id, timeout));
if (_thr_umutex_trylock2(mtx, id) == 0)
return (0);
return (__thr_umutex_timedlock(mtx, id, timeout));
}
static inline int
_thr_umutex_unlock2(struct umutex *mtx, uint32_t id, int *defer)
{
uint32_t flags = mtx->m_flags;
uint32_t flags, owner;
bool noncst;
if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) == 0) {
uint32_t owner;
do {
owner = mtx->m_owner;
if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
return (EPERM);
} while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner,
owner, UMUTEX_UNOWNED)));
if ((owner & UMUTEX_CONTESTED)) {
if (defer == NULL)
(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2, flags, 0, 0);
else
*defer = 1;
}
return (0);
flags = mtx->m_flags;
noncst = (flags & UMUTEX_NONCONSISTENT) != 0;
if ((flags & (UMUTEX_PRIO_PROTECT | UMUTEX_PRIO_INHERIT)) != 0) {
if (atomic_cmpset_rel_32(&mtx->m_owner, id, noncst ?
UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED))
return (0);
return (__thr_umutex_unlock(mtx, id));
}
if (atomic_cmpset_rel_32(&mtx->m_owner, id, UMUTEX_UNOWNED))
return (0);
return (__thr_umutex_unlock(mtx, id));
do {
owner = mtx->m_owner;
if (__predict_false((owner & ~UMUTEX_CONTESTED) != id))
return (EPERM);
} while (__predict_false(!atomic_cmpset_rel_32(&mtx->m_owner, owner,
noncst ? UMUTEX_RB_NOTRECOV : UMUTEX_UNOWNED)));
if ((owner & UMUTEX_CONTESTED) != 0) {
if (defer == NULL || noncst)
(void)_umtx_op_err(mtx, UMTX_OP_MUTEX_WAKE2,
flags, 0, 0);
else
*defer = 1;
}
return (0);
}
static inline int
_thr_umutex_unlock(struct umutex *mtx, uint32_t id)
{
return _thr_umutex_unlock2(mtx, id, NULL);
return (_thr_umutex_unlock2(mtx, id, NULL));
}
static inline int
_thr_rwlock_tryrdlock(struct urwlock *rwlock, int flags)
{
int32_t state;
int32_t wrflags;
int32_t state, wrflags;
if (flags & URWLOCK_PREFER_READER || rwlock->rw_flags & URWLOCK_PREFER_READER)
if ((flags & URWLOCK_PREFER_READER) != 0 ||
(rwlock->rw_flags & URWLOCK_PREFER_READER) != 0)
wrflags = URWLOCK_WRITE_OWNER;
else
wrflags = URWLOCK_WRITE_OWNER | URWLOCK_WRITE_WAITERS;
state = rwlock->rw_state;
while (!(state & wrflags)) {
if (__predict_false(URWLOCK_READER_COUNT(state) == URWLOCK_MAX_READERS))
if (__predict_false(URWLOCK_READER_COUNT(state) ==
URWLOCK_MAX_READERS))
return (EAGAIN);
if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state + 1))
return (0);
@ -179,8 +210,10 @@ _thr_rwlock_trywrlock(struct urwlock *rwlock)
int32_t state;
state = rwlock->rw_state;
while (!(state & URWLOCK_WRITE_OWNER) && URWLOCK_READER_COUNT(state) == 0) {
if (atomic_cmpset_acq_32(&rwlock->rw_state, state, state | URWLOCK_WRITE_OWNER))
while ((state & URWLOCK_WRITE_OWNER) == 0 &&
URWLOCK_READER_COUNT(state) == 0) {
if (atomic_cmpset_acq_32(&rwlock->rw_state, state,
state | URWLOCK_WRITE_OWNER))
return (0);
state = rwlock->rw_state;
}
@ -191,6 +224,7 @@ _thr_rwlock_trywrlock(struct urwlock *rwlock)
static inline int
_thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
{
if (_thr_rwlock_tryrdlock(rwlock, flags) == 0)
return (0);
return (__thr_rwlock_rdlock(rwlock, flags, tsp));
@ -199,6 +233,7 @@ _thr_rwlock_rdlock(struct urwlock *rwlock, int flags, struct timespec *tsp)
static inline int
_thr_rwlock_wrlock(struct urwlock *rwlock, struct timespec *tsp)
{
if (_thr_rwlock_trywrlock(rwlock) == 0)
return (0);
return (__thr_rwlock_wrlock(rwlock, tsp));
@ -210,18 +245,19 @@ _thr_rwlock_unlock(struct urwlock *rwlock)
int32_t state;
state = rwlock->rw_state;
if (state & URWLOCK_WRITE_OWNER) {
if (atomic_cmpset_rel_32(&rwlock->rw_state, URWLOCK_WRITE_OWNER, 0))
if ((state & URWLOCK_WRITE_OWNER) != 0) {
if (atomic_cmpset_rel_32(&rwlock->rw_state,
URWLOCK_WRITE_OWNER, 0))
return (0);
} else {
for (;;) {
if (__predict_false(URWLOCK_READER_COUNT(state) == 0))
return (EPERM);
if (!((state & (URWLOCK_WRITE_WAITERS |
URWLOCK_READ_WAITERS)) &&
URWLOCK_READ_WAITERS)) != 0 &&
URWLOCK_READER_COUNT(state) == 1)) {
if (atomic_cmpset_rel_32(&rwlock->rw_state,
state, state-1))
state, state - 1))
return (0);
state = rwlock->rw_state;
} else {

View file

@ -238,6 +238,7 @@ PTHREAD_MAN= pthread.3 \
pthread_multi_np.3 \
pthread_mutexattr.3 \
pthread_mutexattr_getkind_np.3 \
pthread_mutex_consistent.3 \
pthread_mutex_destroy.3 \
pthread_mutex_init.3 \
pthread_mutex_lock.3 \
@ -312,10 +313,12 @@ PTHREAD_MLINKS+=pthread_multi_np.3 pthread_single_np.3
PTHREAD_MLINKS+=pthread_mutexattr.3 pthread_mutexattr_destroy.3 \
pthread_mutexattr.3 pthread_mutexattr_getprioceiling.3 \
pthread_mutexattr.3 pthread_mutexattr_getprotocol.3 \
pthread_mutexattr.3 pthread_mutexattr_getrobust.3 \
pthread_mutexattr.3 pthread_mutexattr_gettype.3 \
pthread_mutexattr.3 pthread_mutexattr_init.3 \
pthread_mutexattr.3 pthread_mutexattr_setprioceiling.3 \
pthread_mutexattr.3 pthread_mutexattr_setprotocol.3 \
pthread_mutexattr.3 pthread_mutexattr_setrobust.3 \
pthread_mutexattr.3 pthread_mutexattr_settype.3
PTHREAD_MLINKS+=pthread_mutexattr_getkind_np.3 pthread_mutexattr_setkind_np.3
PTHREAD_MLINKS+=pthread_rwlock_rdlock.3 pthread_rwlock_tryrdlock.3

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd February 16, 2006
.Dd April 29, 2016
.Dt PTHREAD_COND_WAIT 3
.Os
.Sh NAME
@ -75,13 +75,25 @@ is invalid.
The specified
.Fa mutex
was not locked by the calling thread.
.It Bq Er EOWNERDEAD
The argument
.Fa mutex
points to a robust mutex and the previous owning thread terminated
while holding the mutex lock.
The lock was granted to the caller and it is up to the new owner
to make the state consistent.
.It Bq Er ENOTRECOVERABLE
The state protected by the
.Fa mutex
is not recoverable.
.El
.Sh SEE ALSO
.Xr pthread_cond_broadcast 3 ,
.Xr pthread_cond_destroy 3 ,
.Xr pthread_cond_init 3 ,
.Xr pthread_cond_signal 3 ,
.Xr pthread_cond_timedwait 3
.Xr pthread_cond_timedwait 3 ,
.Xr pthread_mutex_consistent 3
.Sh STANDARDS
The
.Fn pthread_cond_wait

View file

@ -0,0 +1,94 @@
.\" Copyright (c) 2016 The FreeBSD Foundation, Inc.
.\" All rights reserved.
.\"
.\" This documentation was written by
.\" Konstantin Belousov <kib@FreeBSD.org> under sponsorship
.\" from the FreeBSD Foundation.
.\"
.\" Redistribution and use in source and binary forms, with or without
.\" modification, are permitted provided that the following conditions
.\" are met:
.\" 1. Redistributions of source code must retain the above copyright
.\" notice, this list of conditions and the following disclaimer.
.\" 2. Redistributions in binary form must reproduce the above copyright
.\" notice, this list of conditions and the following disclaimer in the
.\" documentation and/or other materials provided with the distribution.
.\"
.\" THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
.\" SUCH DAMAGE.
.\"
.\" $FreeBSD$
.\"
.Dd May 8, 2016
.Dt PTHREAD_MUTEX_CONSISTENT 3
.Os
.Sh NAME
.Nm pthread_mutex_consistent
.Nd mark state protected by robust mutex as consistent
.Sh LIBRARY
.Lb libpthread
.Sh SYNOPSIS
.In pthread.h
.Ft int
.Fn pthread_mutex_consistent "pthread_mutex_t *mutex"
.Sh DESCRIPTION
If the thread owning a robust mutex terminates while holding the
mutex, the mutex becomes inconsistent and the next thread that
acquires the mutex lock is notified of the state by the return value
.Er EOWNERDEAD .
In this case, the mutex does not become normally usable again until
the state is marked consistent.
.Pp
The
.Fn pthread_mutex_consistent ,
when called with the
.Fa mutex
argument, which points to the initialized robust mutex in an
inconsistent state, marks the by mutex as consistent again.
The consequent unlock of the mutex, by either
.Fn pthread_mutex_unlock
or other methods, allows other contenders to lock the mutex.
.Pp
If the mutex in the inconsistent state is not marked consistent
by the call to
.Fn pthread_mutex_consistent
before unlock,
further attempts to lock the
.Fa mutex
result in the
.Er ENOTRECOVERABLE
condition reported by the locking functions.
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_consistent
will return zero, otherwise an error number will be returned to
indicate the error.
.Sh ERRORS
The
.Fn pthread_mutex_lock
function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
The mutex pointed to by the
.Fa mutex
argument is not robust, or is not in the inconsistent state.
.El
.Sh SEE ALSO
.Xr pthread_mutexattr_setrobust 3 ,
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_lock 3 ,
.Xr pthread_mutex_unlock 3
.Sh STANDARDS
The
.Fn pthread_mutex_lock
function conforms to
.St -susv4 .

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd January 31, 2006
.Dd April 29, 2016
.Dt PTHREAD_MUTEX_LOCK 3
.Os
.Sh NAME
@ -55,7 +55,7 @@ indicate the error.
The
.Fn pthread_mutex_lock
function will fail if:
.Bl -tag -width Er
.Bl -tag -width "Er ENOTRECOVERABLE"
.It Bq Er EINVAL
The value specified by
.Fa mutex
@ -63,8 +63,20 @@ is invalid.
.It Bq Er EDEADLK
A deadlock would occur if the thread blocked waiting for
.Fa mutex .
.It Bq Er EOWNERDEAD
The argument
.Fa mutex
points to a robust mutex and the previous owning thread terminated
while holding the mutex lock.
The lock was granted to the caller and it is up to the new owner
to make the state consistent.
.It Bq Er ENOTRECOVERABLE
The state protected by the
.Fa mutex
is not recoverable.
.El
.Sh SEE ALSO
.Xr pthread_mutex_consistent 3 ,
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_trylock 3 ,

View file

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd December 30, 2003
.Dd April 29, 2016
.Dt PTHREAD_MUTEX_TIMEDLOCK 3
.Os
.Sh NAME
@ -59,7 +59,7 @@ The
.Fn pthread_mutex_timedlock
function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
.It Bq "Er ENOTRECOVERABLE"
The
.Fa mutex
was created with the protocol attribute having the
@ -89,8 +89,20 @@ has been exceeded.
.It Bq Er EDEADLK
The current thread already owns the
.Fa mutex .
.It Bq Er EOWNERDEAD
The argument
.Fa mutex
points to a robust mutex and the previous owning thread terminated
while holding the mutex lock.
The lock was granted to the caller and it is up to the new owner
to make the state consistent.
.It Bq Er ENOTRECOVERABLE
The state protected by the
.Fa mutex
is not recoverable.
.El
.Sh SEE ALSO
.Xr pthread_mutex_consistent 3 ,
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_lock 3 ,

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 30, 1998
.Dd April 29, 2016
.Dt PTHREAD_MUTEX_TRYLOCK 3
.Os
.Sh NAME
@ -56,7 +56,7 @@ indicate the error.
The
.Fn pthread_mutex_trylock
function will fail if:
.Bl -tag -width Er
.Bl -tag -width "Er ENOTRECOVERABLE"
.It Bq Er EINVAL
The value specified by
.Fa mutex
@ -64,8 +64,20 @@ is invalid.
.It Bq Er EBUSY
.Fa Mutex
is already locked.
.It Bq Er EOWNERDEAD
The argument
.Fa mutex
points to a robust mutex and the previous owning thread terminated
while holding the mutex lock.
The lock was granted to the caller and it is up to the new owner
to make the state consistent.
.It Bq Er ENOTRECOVERABLE
The state protected by the
.Fa mutex
is not recoverable.
.El
.Sh SEE ALSO
.Xr pthread_mutex_consistent 3 ,
.Xr pthread_mutex_destroy 3 ,
.Xr pthread_mutex_init 3 ,
.Xr pthread_mutex_lock 3 ,

View file

@ -27,7 +27,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd July 30, 1998
.Dd April 29, 2016
.Dt PTHREAD_MUTEX_UNLOCK 3
.Os
.Sh NAME
@ -46,6 +46,17 @@ then the
.Fn pthread_mutex_unlock
function unlocks
.Fa mutex .
.Pp
If the argument pointed by the
.Fa mutex
is a robust mutex in the inconsistent state, and the call to
.Fn pthread_mutex_consistent
function was not done prior to unlocking, further locking attempts on
the mutex
.Fa mutex
are denied and locking functions return
.Er ENOTRECOVERABLE
error.
.Sh RETURN VALUES
If successful,
.Fn pthread_mutex_unlock

View file

@ -26,7 +26,7 @@
.\" EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
.\"
.\" $FreeBSD$
.Dd May 1, 2000
.Dd April 29, 2016
.Dt PTHREAD_MUTEXATTR 3
.Os
.Sh NAME
@ -36,6 +36,8 @@
.Nm pthread_mutexattr_getprioceiling ,
.Nm pthread_mutexattr_setprotocol ,
.Nm pthread_mutexattr_getprotocol ,
.Nm pthread_mutexattr_setrobust ,
.Nm pthread_mutexattr_getrobust ,
.Nm pthread_mutexattr_settype ,
.Nm pthread_mutexattr_gettype
.Nd mutex attribute operations
@ -56,6 +58,10 @@
.Ft int
.Fn pthread_mutexattr_getprotocol "pthread_mutexattr_t *attr" "int *protocol"
.Ft int
.Fn pthread_mutexattr_setrobust "pthread_mutexattr_t *attr" "int robust"
.Ft int
.Fn pthread_mutexattr_getrobust "pthread_mutexattr_t *attr" "int *robust"
.Ft int
.Fn pthread_mutexattr_settype "pthread_mutexattr_t *attr" "int type"
.Ft int
.Fn pthread_mutexattr_gettype "pthread_mutexattr_t *attr" "int *type"
@ -165,6 +171,26 @@ function will fail if:
Invalid value for
.Fa attr .
.El
.Pp
The
.Fn pthread_mutexattr_setrobust
function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa attr ,
or invalid value for
.Fa robust .
.El
.Pp
The
.Fn pthread_mutexattr_getrobust
function will fail if:
.Bl -tag -width Er
.It Bq Er EINVAL
Invalid value for
.Fa attr .
.El
.Sh SEE ALSO
.Xr pthread_mutex_init 3
.Sh STANDARDS
@ -184,4 +210,10 @@ The
and
.Fn pthread_mutexattr_gettype
functions conform to
.St -susv2
.St -susv2 .
The
.Fn pthread_mutexattr_setrobust
and
.Fn pthread_mutexattr_getrobust
functions conform to
.St -susv4 .

View file

@ -30,6 +30,7 @@ __FBSDID("$FreeBSD$");
#include <sys/proc.h>
#include <sys/sched.h>
#include <sys/syscallsubr.h>
#include <sys/umtx.h>
#include <contrib/cloudabi/cloudabi_types_common.h>
@ -44,6 +45,8 @@ cloudabi_sys_thread_exit(struct thread *td,
.scope = uap->scope,
};
umtx_thread_exit(td);
/* Wake up joining thread. */
cloudabi_sys_lock_unlock(td, &cloudabi_sys_lock_unlock_args);

View file

@ -42,6 +42,7 @@ __FBSDID("$FreeBSD$");
#include <sys/sched.h>
#include <sys/syscallsubr.h>
#include <sys/sx.h>
#include <sys/umtx.h>
#include <sys/unistd.h>
#include <sys/wait.h>
@ -410,6 +411,8 @@ linux_exit(struct thread *td, struct linux_exit_args *args)
LINUX_CTR2(exit, "thread(%d) (%d)", em->em_tid, args->rval);
umtx_thread_exit(td);
linux_thread_detach(td);
/*

View file

@ -350,8 +350,11 @@ exit1(struct thread *td, int rval, int signo)
KASSERT(!timevalisset(&p->p_realtimer.it_value),
("realtime timer is still armed"));
}
PROC_UNLOCK(p);
umtx_thread_exit(td);
/*
* Reset any sigio structures pointing to us as a result of
* F_SETOWN with our pid.
@ -595,7 +598,6 @@ exit1(struct thread *td, int rval, int signo)
wakeup(p->p_pptr);
cv_broadcast(&p->p_pwait);
sched_exit(p->p_pptr, td);
umtx_thread_exit(td);
PROC_SLOCK(p);
p->p_state = PRS_ZOMBIE;
PROC_UNLOCK(p->p_pptr);

View file

@ -308,6 +308,8 @@ sys_thr_exit(struct thread *td, struct thr_exit_args *uap)
/* long *state */
{
umtx_thread_exit(td);
/* Signal userland that it can free the stack. */
if ((void *)uap->state != NULL) {
suword_lwpid(uap->state, 1);
@ -367,7 +369,6 @@ kern_thr_exit(struct thread *td)
KASSERT(p->p_numthreads > 1, ("too few threads"));
racct_sub(p, RACCT_NTHR, 1);
tdsigcleanup(td);
umtx_thread_exit(td);
PROC_SLOCK(p);
thread_stopped(p);
thread_exit();

View file

@ -950,6 +950,7 @@ thread_suspend_check(int return_instead)
*/
if (__predict_false(p->p_sysent->sv_thread_detach != NULL))
(p->p_sysent->sv_thread_detach)(td);
umtx_thread_exit(td);
kern_thr_exit(td);
panic("stopped thread did not exit");
}

File diff suppressed because it is too large Load diff

View file

@ -37,7 +37,11 @@ struct umutex {
volatile __lwpid_t m_owner; /* Owner of the mutex */
__uint32_t m_flags; /* Flags of the mutex */
__uint32_t m_ceilings[2]; /* Priority protect ceiling */
__uint32_t m_spare[4];
__uintptr_t m_rb_lnk; /* Robust linkage */
#ifndef __LP64__
__uint32_t m_pad;
#endif
__uint32_t m_spare[2];
};
struct ucond {

View file

@ -282,6 +282,9 @@ struct thread {
int td_no_sleeping; /* (k) Sleeping disabled count. */
int td_dom_rr_idx; /* (k) RR Numa domain selection. */
void *td_su; /* (k) FFS SU private */
uintptr_t td_rb_list; /* (k) Robust list head. */
uintptr_t td_rbp_list; /* (k) Robust priv list head. */
uintptr_t td_rb_inact; /* (k) Current in-action mutex loc. */
#define td_endzero td_sigmask
/* Copied during fork1() or create_thread(). */

View file

@ -32,13 +32,26 @@
#include <sys/_umtx.h>
/* Common lock flags */
#define USYNC_PROCESS_SHARED 0x0001 /* Process shared sync objs */
#define UMUTEX_UNOWNED 0x0
#define UMUTEX_CONTESTED 0x80000000U
/* umutex flags */
#define UMUTEX_PRIO_INHERIT 0x0004 /* Priority inherited mutex */
#define UMUTEX_PRIO_PROTECT 0x0008 /* Priority protect mutex */
#define UMUTEX_ROBUST 0x0010 /* Robust mutex */
#define UMUTEX_NONCONSISTENT 0x0020 /* Robust locked but not consistent */
/*
* The umutex.m_lock values and bits. The m_owner is the word which
* serves as the lock. Its high bit is the contention indicator and
* rest of bits records the owner TID. TIDs values start with PID_MAX
* + 2 and end by INT32_MAX. The low range [1..PID_MAX] is guaranteed
* to be useable as the special markers.
*/
#define UMUTEX_UNOWNED 0x0
#define UMUTEX_CONTESTED 0x80000000U
#define UMUTEX_RB_OWNERDEAD (UMUTEX_CONTESTED | 0x10)
#define UMUTEX_RB_NOTRECOV (UMUTEX_CONTESTED | 0x11)
/* urwlock flags */
#define URWLOCK_PREFER_READER 0x0002
@ -84,6 +97,7 @@
#define UMTX_OP_SEM2_WAIT 23
#define UMTX_OP_SEM2_WAKE 24
#define UMTX_OP_SHM 25
#define UMTX_OP_ROBUST_LISTS 26
/* Flags for UMTX_OP_CV_WAIT */
#define CVWAIT_CHECK_UNPARKING 0x01
@ -100,6 +114,12 @@
#define UMTX_SHM_DESTROY 0x0004
#define UMTX_SHM_ALIVE 0x0008
struct umtx_robust_lists_params {
uintptr_t robust_list_offset;
uintptr_t robust_priv_list_offset;
uintptr_t robust_inact_offset;
};
#ifndef _KERNEL
int _umtx_op(void *obj, int op, u_long val, void *uaddr, void *uaddr2);
@ -122,6 +142,8 @@ enum {
TYPE_RWLOCK,
TYPE_FUTEX,
TYPE_SHM,
TYPE_PI_ROBUST_UMUTEX,
TYPE_PP_ROBUST_UMUTEX,
};
/* Key to represent a unique userland synchronous object */

View file

@ -476,7 +476,7 @@ vm_object_vndeallocate(vm_object_t object)
}
#endif
if (object->ref_count == 1)
if (!umtx_shm_vnobj_persistent && object->ref_count == 1)
umtx_shm_object_terminated(object);
/*

View file

@ -300,6 +300,7 @@ vm_object_cache_is_empty(vm_object_t object)
void umtx_shm_object_init(vm_object_t object);
void umtx_shm_object_terminated(vm_object_t object);
extern int umtx_shm_vnobj_persistent;
vm_object_t vm_object_allocate (objtype_t, vm_pindex_t);
boolean_t vm_object_coalesce(vm_object_t, vm_ooffset_t, vm_size_t, vm_size_t,

View file

@ -164,6 +164,7 @@ vnode_destroy_vobject(struct vnode *vp)
return;
ASSERT_VOP_ELOCKED(vp, "vnode_destroy_vobject");
VM_OBJECT_WLOCK(obj);
umtx_shm_object_terminated(obj);
if (obj->ref_count == 0) {
/*
* don't double-terminate the object