handle_mounts(): start building a sane wrapper for follow_managed()

All callers of follow_managed() follow it on success with the same steps -
d_backing_inode(path->dentry) is calculated and stored into some struct inode *
variable and, in all but one case, an unsigned variable (nd->seq to be) is
zeroed.  The single exception is lookup_fast() and there zeroing is correct
thing to do - not doing it is a pointless microoptimization.

	Add a wrapper for follow_managed() that would do that combination.
It's mostly a vehicle for code massage - it will be changing quite a bit,
and the current calling conventions are by no means final.  Right now it
takes path, nameidata and (as out params) inode and seq, similar to
__follow_mount_rcu().  Which will soon get folded into it...

Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
This commit is contained in:
Al Viro 2020-01-08 20:37:23 -05:00
parent 31d1726d72
commit bd7c4b5083

View file

@ -1385,6 +1385,18 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
!(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT); !(path->dentry->d_flags & DCACHE_NEED_AUTOMOUNT);
} }
static inline int handle_mounts(struct path *path, struct nameidata *nd,
struct inode **inode, unsigned int *seqp)
{
int ret = follow_managed(path, nd);
if (likely(ret >= 0)) {
*inode = d_backing_inode(path->dentry);
*seqp = 0; /* out of RCU mode, so the value doesn't matter */
}
return ret;
}
static int follow_dotdot_rcu(struct nameidata *nd) static int follow_dotdot_rcu(struct nameidata *nd)
{ {
struct inode *inode = nd->inode; struct inode *inode = nd->inode;
@ -1607,7 +1619,6 @@ static int lookup_fast(struct nameidata *nd,
struct vfsmount *mnt = nd->path.mnt; struct vfsmount *mnt = nd->path.mnt;
struct dentry *dentry, *parent = nd->path.dentry; struct dentry *dentry, *parent = nd->path.dentry;
int status = 1; int status = 1;
int err;
/* /*
* Rename seqlock is not required here because in the off chance * Rename seqlock is not required here because in the off chance
@ -1677,10 +1688,7 @@ static int lookup_fast(struct nameidata *nd,
path->mnt = mnt; path->mnt = mnt;
path->dentry = dentry; path->dentry = dentry;
err = follow_managed(path, nd); return handle_mounts(path, nd, inode, seqp);
if (likely(err > 0))
*inode = d_backing_inode(path->dentry);
return err;
} }
/* Fast lookup failed, do it the slow way */ /* Fast lookup failed, do it the slow way */
@ -1875,12 +1883,9 @@ static int walk_component(struct nameidata *nd, int flags)
return PTR_ERR(path.dentry); return PTR_ERR(path.dentry);
path.mnt = nd->path.mnt; path.mnt = nd->path.mnt;
err = follow_managed(&path, nd); err = handle_mounts(&path, nd, &inode, &seq);
if (unlikely(err < 0)) if (unlikely(err < 0))
return err; return err;
seq = 0; /* we are already out of RCU mode */
inode = d_backing_inode(path.dentry);
} }
return step_into(nd, &path, flags, inode, seq); return step_into(nd, &path, flags, inode, seq);
@ -2365,11 +2370,9 @@ static int handle_lookup_down(struct nameidata *nd)
return -ECHILD; return -ECHILD;
} else { } else {
dget(path.dentry); dget(path.dentry);
err = follow_managed(&path, nd); err = handle_mounts(&path, nd, &inode, &seq);
if (unlikely(err < 0)) if (unlikely(err < 0))
return err; return err;
inode = d_backing_inode(path.dentry);
seq = 0;
} }
path_to_nameidata(&path, nd); path_to_nameidata(&path, nd);
nd->inode = inode; nd->inode = inode;
@ -3392,12 +3395,9 @@ static int do_last(struct nameidata *nd,
got_write = false; got_write = false;
} }
error = follow_managed(&path, nd); error = handle_mounts(&path, nd, &inode, &seq);
if (unlikely(error < 0)) if (unlikely(error < 0))
return error; return error;
seq = 0; /* out of RCU mode, so the value doesn't matter */
inode = d_backing_inode(path.dentry);
finish_lookup: finish_lookup:
error = step_into(nd, &path, 0, inode, seq); error = step_into(nd, &path, 0, inode, seq);
if (unlikely(error)) if (unlikely(error))