We decided to request the latest three patches to be merged into this

merge window while it's still open.
 
 1. The first patch adds a new function to lockref: lockref_put_not_zero
 2. The second patch fixes GFS2's glock dump code so it uses the new lockref
    function. This fixes a problem whereby lock dumps could miss glocks.
 3. I made a minor patch to update some comments and fix the lock ordering
    text in our gfs2-glocks.txt Documentation file.
 -----BEGIN PGP SIGNATURE-----
 
 iQEcBAABAgAGBQJaz6pdAAoJENeLYdPf93o71wMH/0cEo34xWiScRM07EgLmZZ3q
 YXMvpTvrwK+9i2u8anxiX1smezHeS+7jPrYOG8AGu3IZvKYGTDOwoIY9pxESy5gs
 1Rf60s6pPE/dkTSqPaNNuBxPrM1yVyRWOPx04LxC5BCXhsS/6U2RS9ElxGDe7Nyq
 P66z1wfm63+erDR7mKSuOL3Ejtglj2EPcrAupaBlRS0wjdUQ9ORyrZBpT6JMOWqd
 HWjchrzWVAqx+iyLHlKZjTyPHsPaUBaj1fuv/Vcgu5sJmEJ9mF4s/GQTdwIzi8ip
 ByD7MfilyrT7dxRm1uw8OJ7TvqNeaCtxsyNGGBOlSx81s/pk5Vhs8bevnczNvi8=
 =jWsi
 -----END PGP SIGNATURE-----

Merge tag 'gfs2-4.17.fixes2' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2

Pull more gfs2 updates from Bob Peterson:
 "We decided to request the latest three patches to be merged into this
  merge window while it's still open.

   - The first patch adds a new function to lockref:
     lockref_put_not_zero

   - The second patch fixes GFS2's glock dump code so it uses the new
     lockref function. This fixes a problem whereby lock dumps could
     miss glocks.

   - I made a minor patch to update some comments and fix the lock
     ordering text in our gfs2-glocks.txt Documentation file"

* tag 'gfs2-4.17.fixes2' of git://git.kernel.org/pub/scm/linux/kernel/git/gfs2/linux-gfs2:
  GFS2: Minor improvements to comments and documentation
  gfs2: Stop using rhashtable_walk_peek
  lockref: Add lockref_put_not_zero
This commit is contained in:
Linus Torvalds 2018-04-12 13:00:44 -07:00
commit 4ac1800f81
6 changed files with 62 additions and 23 deletions

View file

@ -100,14 +100,15 @@ indicates that it is caching uptodate data.
Glock locking order within GFS2:
1. i_mutex (if required)
1. i_rwsem (if required)
2. Rename glock (for rename only)
3. Inode glock(s)
(Parents before children, inodes at "same level" with same parent in
lock number order)
4. Rgrp glock(s) (for (de)allocation operations)
5. Transaction glock (via gfs2_trans_begin) for non-read operations
6. Page lock (always last, very important!)
6. i_rw_mutex (if required)
7. Page lock (always last, very important!)
There are two glocks per inode. One deals with access to the inode
itself (locking order as above), and the other, known as the iopen

View file

@ -1744,7 +1744,7 @@ static int do_grow(struct inode *inode, u64 size)
* @newsize: the size to make the file
*
* The file size can grow, shrink, or stay the same size. This
* is called holding i_mutex and an exclusive glock on the inode
* is called holding i_rwsem and an exclusive glock on the inode
* in question.
*
* Returns: errno

View file

@ -1923,28 +1923,37 @@ void gfs2_glock_exit(void)
static void gfs2_glock_iter_next(struct gfs2_glock_iter *gi, loff_t n)
{
if (n == 0)
gi->gl = rhashtable_walk_peek(&gi->hti);
else {
gi->gl = rhashtable_walk_next(&gi->hti);
n--;
struct gfs2_glock *gl = gi->gl;
if (gl) {
if (n == 0)
return;
if (!lockref_put_not_zero(&gl->gl_lockref))
gfs2_glock_queue_put(gl);
}
for (;;) {
if (IS_ERR_OR_NULL(gi->gl)) {
if (!gi->gl)
return;
if (PTR_ERR(gi->gl) != -EAGAIN) {
gi->gl = NULL;
return;
gl = rhashtable_walk_next(&gi->hti);
if (IS_ERR_OR_NULL(gl)) {
if (gl == ERR_PTR(-EAGAIN)) {
n = 1;
continue;
}
n = 0;
} else if (gi->sdp == gi->gl->gl_name.ln_sbd &&
!__lockref_is_dead(&gi->gl->gl_lockref)) {
if (!n--)
break;
gl = NULL;
break;
}
if (gl->gl_name.ln_sbd != gi->sdp)
continue;
if (n <= 1) {
if (!lockref_get_not_dead(&gl->gl_lockref))
continue;
break;
} else {
if (__lockref_is_dead(&gl->gl_lockref))
continue;
n--;
}
gi->gl = rhashtable_walk_next(&gi->hti);
}
gi->gl = gl;
}
static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
@ -1988,7 +1997,6 @@ static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
{
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
rhashtable_walk_stop(&gi->hti);
}
@ -2076,7 +2084,8 @@ static int gfs2_glocks_release(struct inode *inode, struct file *file)
struct seq_file *seq = file->private_data;
struct gfs2_glock_iter *gi = seq->private;
gi->gl = NULL;
if (gi->gl)
gfs2_glock_put(gi->gl);
rhashtable_walk_exit(&gi->hti);
return seq_release_private(inode, file);
}

View file

@ -825,7 +825,7 @@ static int init_inodes(struct gfs2_sbd *sdp, int undo)
goto fail_rindex;
}
/*
* i_mutex on quota files is special. Since this inode is hidden system
* i_rwsem on quota files is special. Since this inode is hidden system
* file, we are safe to define locking ourselves.
*/
lockdep_set_class(&sdp->sd_quota_inode->i_rwsem,

View file

@ -37,6 +37,7 @@ struct lockref {
extern void lockref_get(struct lockref *);
extern int lockref_put_return(struct lockref *);
extern int lockref_get_not_zero(struct lockref *);
extern int lockref_put_not_zero(struct lockref *);
extern int lockref_get_or_lock(struct lockref *);
extern int lockref_put_or_lock(struct lockref *);

View file

@ -80,6 +80,34 @@ int lockref_get_not_zero(struct lockref *lockref)
}
EXPORT_SYMBOL(lockref_get_not_zero);
/**
* lockref_put_not_zero - Decrements count unless count <= 1 before decrement
* @lockref: pointer to lockref structure
* Return: 1 if count updated successfully or 0 if count would become zero
*/
int lockref_put_not_zero(struct lockref *lockref)
{
int retval;
CMPXCHG_LOOP(
new.count--;
if (old.count <= 1)
return 0;
,
return 1;
);
spin_lock(&lockref->lock);
retval = 0;
if (lockref->count > 1) {
lockref->count--;
retval = 1;
}
spin_unlock(&lockref->lock);
return retval;
}
EXPORT_SYMBOL(lockref_put_not_zero);
/**
* lockref_get_or_lock - Increments count unless the count is 0 or dead
* @lockref: pointer to lockref structure