ufs: handle LoR between snap lock and vnode lock

When a filesystem is mounted all of its associated snapshots must
be activated. It first allocates a snapshot lock (snaplk) that will
be shared by all the snapshot vnodes associated with the filesystem.
As part of each snapshot file activation, it must replace its own
ufs vnode lock with the snaplk. In this way acquiring the snaplk
gives exclusive access to all the snapshots for the filesystem.

A write to a ufs vnode first acquires the ufs vnode lock for the
file to be written then acquires the snaplk. Once it has the snaplk,
it can check all the snapshots to see if any of them needs to make
a copy of the block that is about to be written. This ffs_copyonwrite()
code path establishes the ufs vnode followed by snaplk locking
order.

When a filesystem is unmounted it has to release all of its snapshot
vnodes. Part of doing the release is to revert the snapshot vnode
from using the snaplk to using its original vnode lock. While holding
the snaplk, the vnode lock has to be acquired, the vnode updated
to reference it, then the snaplk released. Acquiring the vnode lock
while holding the snaplk violates the ufs vnode then snaplk order.
Because the vnode lock is unused, using LK_EXCLUSIVE | LK_NOWAIT
to acquire it will always succeed and the LK_NOWAIT prevents the
reverse lock order from being recorded.

This change was made in January 2021 (173779b98f) to avoid an LOR
violation in ffs_snapshot_unmount(). The same LOR issue was recently
found again when removing a snapshot in ffs_snapremove() which must
also revert the snaplk to the original vnode lock as part of freeing it.

The unwind in ffs_snapremove() deals with the case in which the
snaplk is held as a recursive lock holding multiple references.
Specifically an equal number of references are made on the vnode
lock. This change factors out the lock reversion operations into a
new function revert_snaplock() which handles both the recursive
locks and avoids the LOR. The new revert_snaplock() function is
then used in both ffs_snapshot_unmount() and in ffs_snapremove().

Reviewed by:  kib
Tested by:    Peter Holm
MFC after:    2 weeks
Sponsored by: Netflix
Differential Revision: https://reviews.freebsd.org/D33946
This commit is contained in:
Kirk McKusick 2022-01-27 23:00:51 -08:00
parent 140db0be99
commit ddf162d1d1

View file

@ -175,6 +175,7 @@ static int mapacct_ufs2(struct vnode *, ufs2_daddr_t *, ufs2_daddr_t *,
struct fs *, ufs_lbn_t, int);
static int readblock(struct vnode *vp, struct buf *, ufs2_daddr_t);
static void try_free_snapdata(struct vnode *devvp);
static void revert_snaplock(struct vnode *, struct vnode *, struct snapdata *);
static struct snapdata *ffs_snapdata_acquire(struct vnode *devvp);
static int ffs_bp_snapblk(struct vnode *, struct buf *);
@ -1651,7 +1652,7 @@ ffs_snapremove(vp)
struct buf *ibp;
struct fs *fs;
ufs2_daddr_t numblks, blkno, dblk;
int error, i, last, loc;
int error, last, loc;
struct snapdata *sn;
ip = VTOI(vp);
@ -1669,20 +1670,10 @@ ffs_snapremove(vp)
sn = devvp->v_rdev->si_snapdata;
TAILQ_REMOVE(&sn->sn_head, ip, i_nextsnap);
ip->i_nextsnap.tqe_prev = 0;
VI_UNLOCK(devvp);
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
for (i = 0; i < sn->sn_lock.lk_recurse; i++)
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
KASSERT(vp->v_vnlock == &sn->sn_lock,
("ffs_snapremove: lost lock mutation"));
vp->v_vnlock = &vp->v_lock;
VI_LOCK(devvp);
while (sn->sn_lock.lk_recurse > 0)
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
revert_snaplock(vp, devvp, sn);
try_free_snapdata(devvp);
} else
VI_UNLOCK(devvp);
}
VI_UNLOCK(devvp);
/*
* Clear all BLK_NOCOPY fields. Pass any block claims to other
* snapshots that want them (see ffs_snapblkfree below).
@ -2152,27 +2143,18 @@ ffs_snapshot_unmount(mp)
xp->i_nextsnap.tqe_prev = 0;
lockmgr(&sn->sn_lock, LK_INTERLOCK | LK_EXCLUSIVE,
VI_MTX(devvp));
/*
* Avoid LOR with above snapshot lock. The LK_NOWAIT should
* never fail as the lock is currently unused. Rather than
* panic, we recover by doing the blocking lock.
*/
if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
printf("ffs_snapshot_unmount: Unexpected LK_NOWAIT "
"failure\n");
lockmgr(&vp->v_lock, LK_EXCLUSIVE, NULL);
}
KASSERT(vp->v_vnlock == &sn->sn_lock,
("ffs_snapshot_unmount: lost lock mutation"));
vp->v_vnlock = &vp->v_lock;
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
if (xp->i_effnlink > 0)
vrele(vp);
VI_LOCK(devvp);
revert_snaplock(vp, devvp, sn);
lockmgr(&vp->v_lock, LK_RELEASE, NULL);
if (xp->i_effnlink > 0) {
VI_UNLOCK(devvp);
vrele(vp);
VI_LOCK(devvp);
}
sn = devvp->v_rdev->si_snapdata;
}
try_free_snapdata(devvp);
VI_UNLOCK(devvp);
}
/*
@ -2676,10 +2658,8 @@ try_free_snapdata(struct vnode *devvp)
sn = devvp->v_rdev->si_snapdata;
if (sn == NULL || TAILQ_FIRST(&sn->sn_head) != NULL ||
(devvp->v_vflag & VV_COPYONWRITE) == 0) {
VI_UNLOCK(devvp);
(devvp->v_vflag & VV_COPYONWRITE) == 0)
return;
}
devvp->v_rdev->si_snapdata = NULL;
devvp->v_vflag &= ~VV_COPYONWRITE;
@ -2691,6 +2671,46 @@ try_free_snapdata(struct vnode *devvp)
if (snapblklist != NULL)
free(snapblklist, M_UFSMNT);
ffs_snapdata_free(sn);
VI_LOCK(devvp);
}
/*
* Revert a vnode lock from using the snapshot lock back to its own lock.
*
* Aquire a lock on the vnode's own lock and release the lock on the
* snapshot lock. If there are any recursions on the snapshot lock
* get the same number of recursions on the vnode's own lock.
*/
static void
revert_snaplock(vp, devvp, sn)
struct vnode *vp;
struct vnode *devvp;
struct snapdata *sn;
{
int i;
ASSERT_VI_LOCKED(devvp, "revert_snaplock");
/*
* Avoid LOR with snapshot lock. The LK_NOWAIT should
* never fail as the lock is currently unused. Rather than
* panic, we recover by doing the blocking lock.
*/
for (i = 0; i <= sn->sn_lock.lk_recurse; i++) {
if (lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_NOWAIT |
LK_INTERLOCK, VI_MTX(devvp)) != 0) {
printf("revert_snaplock: Unexpected LK_NOWAIT "
"failure\n");
lockmgr(&vp->v_lock, LK_EXCLUSIVE | LK_INTERLOCK,
VI_MTX(devvp));
}
VI_LOCK(devvp);
}
KASSERT(vp->v_vnlock == &sn->sn_lock,
("revert_snaplock: lost lock mutation"));
vp->v_vnlock = &vp->v_lock;
while (sn->sn_lock.lk_recurse > 0)
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
lockmgr(&sn->sn_lock, LK_RELEASE, NULL);
}
static struct snapdata *