Add _NEW flag to mtx(9), sx(9), rmlock(9) and rwlock(9).

A _NEW flag passed to _init_flags() to avoid check for double-init.

Differential Revision:	https://reviews.freebsd.org/D1208
Reviewed by:	jhb, wblock
MFC after:	1 Month
This commit is contained in:
Dmitry Chagin 2014-12-13 21:00:10 +00:00
parent 5571af1d8f
commit fd07ddcf6f
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=275751
14 changed files with 68 additions and 18 deletions

View file

@ -28,7 +28,7 @@
.\" from BSDI $Id: mutex.4,v 1.1.2.3 1998/04/27 22:53:13 ewv Exp $
.\" $FreeBSD$
.\"
.Dd November 16, 2011
.Dd December 13, 2014
.Dt MUTEX 9
.Os
.Sh NAME
@ -177,13 +177,17 @@ It may contain either
or
.Dv MTX_SPIN
but not both.
See below for additional initialization options.
It is not permissible to pass the same
.Fa mutex
to
If the kernel has been compiled with
.Cd "option INVARIANTS" ,
.Fn mtx_init
multiple times without intervening calls to
.Fn mtx_destroy .
will assert that the
.Fa mutex
has not been initialized multiple times without intervening calls to
.Fn mtx_destroy
unless the
.Dv MTX_NEW
option is specified.
See below for additional initialization options.
.Pp
The
.Fn mtx_lock
@ -453,6 +457,8 @@ to ignore this lock.
Witness should not log messages about duplicate locks being acquired.
.It Dv MTX_NOPROFILE
Do not profile this lock.
.It Dv MTX_NEW
Do not check for double-init.
.El
.Ss Lock and Unlock Flags
The flags passed to the

View file

@ -26,7 +26,7 @@
.\" $FreeBSD$
.\"
.\" Based on rwlock.9 man page
.Dd June 25, 2013
.Dd December 13, 2014
.Dt RMLOCK 9
.Os
.Sh NAME
@ -156,6 +156,15 @@ Allow threads to recursively acquire shared locks for
.Fa rm .
.It Dv RM_SLEEPABLE
Create a sleepable read-mostly lock.
.It Dv RM_NEW
If the kernel has been compiled with
.Cd "option INVARIANTS" ,
.Fn rm_init_flags
will assert that the
.Fa rm
has not been initialized multiple times without intervening calls to
.Fn rm_destroy
unless this option is specified.
.El
.It Fn rm_rlock "struct rmlock *rm" "struct rm_priotracker* tracker"
Lock

View file

@ -24,7 +24,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd June 20, 2013
.Dd December 13, 2014
.Dt RWLOCK 9
.Os
.Sh NAME
@ -154,6 +154,15 @@ Do not log any operations for this lock via
.It Dv RW_RECURSE
Allow threads to recursively acquire exclusive locks for
.Fa rw .
.It Dv RW_NEW
If the kernel has been compiled with
.Cd "option INVARIANTS" ,
.Fn rw_init_flags
will assert that the
.Fa rw
has not been initialized multiple times without intervening calls to
.Fn rw_destroy
unless this option is specified.
.El
.It Fn rw_rlock "struct rwlock *rw"
Lock

View file

@ -26,7 +26,7 @@
.\"
.\" $FreeBSD$
.\"
.Dd November 16, 2011
.Dd December 13, 2014
.Dt SX 9
.Os
.Sh NAME
@ -144,6 +144,15 @@ Allow threads to recursively acquire exclusive locks for
.It Dv SX_QUIET
Do not log any operations for this lock via
.Xr ktr 4 .
.It Dv SX_NEW
If the kernel has been compiled with
.Cd "options INVARIANTS" ,
.Fn sx_init
will assert that the
.Fa sx
has not been initialized multiple times without intervening calls to
.Fn sx_destroy
unless this option is specified.
.El
.Pp
Shared/exclusive locks are destroyed with

View file

@ -881,7 +881,7 @@ _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
m = mtxlock2mtx(c);
MPASS((opts & ~(MTX_SPIN | MTX_QUIET | MTX_RECURSE |
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE)) == 0);
MTX_NOWITNESS | MTX_DUPOK | MTX_NOPROFILE | MTX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(m->mtx_lock,
("%s: mtx_lock not aligned for %s: %p", __func__, name,
&m->mtx_lock));
@ -907,6 +907,8 @@ _mtx_init(volatile uintptr_t *c, const char *name, const char *type, int opts)
flags |= LO_DUPOK;
if (opts & MTX_NOPROFILE)
flags |= LO_NOPROFILE;
if (opts & MTX_NEW)
flags |= LO_NEW;
/* Initialize mutex. */
lock_init(&m->lock_object, class, name, type, flags);

View file

@ -277,22 +277,28 @@ void
rm_init_flags(struct rmlock *rm, const char *name, int opts)
{
struct lock_class *lc;
int liflags;
int liflags, xflags;
liflags = 0;
if (!(opts & RM_NOWITNESS))
liflags |= LO_WITNESS;
if (opts & RM_RECURSE)
liflags |= LO_RECURSABLE;
if (opts & RM_NEW)
liflags |= LO_NEW;
rm->rm_writecpus = all_cpus;
LIST_INIT(&rm->rm_activeReaders);
if (opts & RM_SLEEPABLE) {
liflags |= LO_SLEEPABLE;
lc = &lock_class_rm_sleepable;
sx_init_flags(&rm->rm_lock_sx, "rmlock_sx", SX_NOWITNESS);
xflags = (opts & RM_NEW ? SX_NEW : 0);
sx_init_flags(&rm->rm_lock_sx, "rmlock_sx",
xflags | SX_NOWITNESS);
} else {
lc = &lock_class_rm;
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx", MTX_NOWITNESS);
xflags = (opts & RM_NEW ? MTX_NEW : 0);
mtx_init(&rm->rm_lock_mtx, name, "rmlock_mtx",
xflags | MTX_NOWITNESS);
}
lock_init(&rm->lock_object, lc, name, NULL, liflags);
}

View file

@ -187,7 +187,7 @@ _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
rw = rwlock2rw(c);
MPASS((opts & ~(RW_DUPOK | RW_NOPROFILE | RW_NOWITNESS | RW_QUIET |
RW_RECURSE)) == 0);
RW_RECURSE | RW_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(rw->rw_lock,
("%s: rw_lock not aligned for %s: %p", __func__, name,
&rw->rw_lock));
@ -203,6 +203,8 @@ _rw_init_flags(volatile uintptr_t *c, const char *name, int opts)
flags |= LO_RECURSABLE;
if (opts & RW_QUIET)
flags |= LO_QUIET;
if (opts & RW_NEW)
flags |= LO_NEW;
lock_init(&rw->lock_object, &lock_class_rw, name, NULL, flags);
rw->rw_lock = RW_UNLOCKED;

View file

@ -209,7 +209,7 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
int flags;
MPASS((opts & ~(SX_QUIET | SX_RECURSE | SX_NOWITNESS | SX_DUPOK |
SX_NOPROFILE | SX_NOADAPTIVE)) == 0);
SX_NOPROFILE | SX_NOADAPTIVE | SX_NEW)) == 0);
ASSERT_ATOMIC_LOAD_PTR(sx->sx_lock,
("%s: sx_lock not aligned for %s: %p", __func__, description,
&sx->sx_lock));
@ -225,6 +225,8 @@ sx_init_flags(struct sx *sx, const char *description, int opts)
flags |= LO_RECURSABLE;
if (opts & SX_QUIET)
flags |= LO_QUIET;
if (opts & SX_NEW)
flags |= LO_NEW;
flags |= opts & SX_NOADAPTIVE;
lock_init(&sx->lock_object, &lock_class_sx, description, NULL, flags);

View file

@ -75,8 +75,8 @@ lock_init(struct lock_object *lock, struct lock_class *class, const char *name,
int i;
/* Check for double-init and zero object. */
KASSERT(!lock_initialized(lock), ("lock \"%s\" %p already initialized",
name, lock));
KASSERT(flags & LO_NEW || !lock_initialized(lock),
("lock \"%s\" %p already initialized", name, lock));
/* Look up lock class to find its index. */
for (i = 0; i < LOCK_CLASS_MAX; i++)

View file

@ -84,6 +84,7 @@ struct lock_class {
#define LO_IS_VNODE 0x00800000 /* Tell WITNESS about a VNODE lock */
#define LO_CLASSMASK 0x0f000000 /* Class index bitmask. */
#define LO_NOPROFILE 0x10000000 /* Don't profile this lock */
#define LO_NEW 0x20000000 /* Don't check for double-init */
/*
* Lock classes are statically assigned an index into the gobal lock_classes

View file

@ -52,6 +52,7 @@
#define MTX_RECURSE 0x00000004 /* Option: lock allowed to recurse */
#define MTX_NOWITNESS 0x00000008 /* Don't do any witness checking. */
#define MTX_NOPROFILE 0x00000020 /* Don't profile this lock */
#define MTX_NEW 0x00000040 /* Don't check for double-init */
/*
* Option flags passed to certain lock/unlock routines, through the use

View file

@ -45,6 +45,7 @@
#define RM_NOWITNESS 0x00000001
#define RM_RECURSE 0x00000002
#define RM_SLEEPABLE 0x00000004
#define RM_NEW 0x00000008
void rm_init(struct rmlock *rm, const char *name);
void rm_init_flags(struct rmlock *rm, const char *name, int opts);

View file

@ -258,6 +258,7 @@ struct rw_args_flags {
#define RW_NOWITNESS 0x04
#define RW_QUIET 0x08
#define RW_RECURSE 0x10
#define RW_NEW 0x20
/*
* The INVARIANTS-enabled rw_assert() functionality.

View file

@ -292,6 +292,7 @@ __sx_sunlock(struct sx *sx, const char *file, int line)
#define SX_QUIET 0x08
#define SX_NOADAPTIVE 0x10
#define SX_RECURSE 0x20
#define SX_NEW 0x40
/*
* Options passed to sx_*lock_hard().