Add vnode_pager_clean_{a,}sync(9)

Bump __FreeBSD_version for ZFS use.

Reviewed by:	markj
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D43356
This commit is contained in:
Konstantin Belousov 2024-01-08 07:18:40 +02:00
parent 1f62718d8b
commit b068bb09a1
16 changed files with 82 additions and 118 deletions

View File

@ -66,6 +66,7 @@ enum symfollow { NO_FOLLOW = NOFOLLOW };
#include <sys/syscallsubr.h>
#include <sys/vm.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
typedef struct vop_vector vnodeops_t;
#define VOP_FID VOP_VPTOFH
@ -100,11 +101,11 @@ vn_flush_cached_data(vnode_t *vp, boolean_t sync)
#else
if (vp->v_object->flags & OBJ_MIGHTBEDIRTY) {
#endif
int flags = sync ? OBJPC_SYNC : 0;
vn_lock(vp, LK_SHARED | LK_RETRY);
zfs_vmobject_wlock(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, flags);
zfs_vmobject_wunlock(vp->v_object);
if (sync)
vnode_pager_clean_sync(vp);
else
vnode_pager_clean_async(vp);
VOP_UNLOCK1(vp);
}
}

View File

@ -95,6 +95,7 @@
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
#include "fuse.h"
#include "fuse_file.h"
@ -945,11 +946,7 @@ fuse_io_invalbuf(struct vnode *vp, struct thread *td)
}
fvdat->flag |= FN_FLUSHINPROG;
if (vp->v_bufobj.bo_object != NULL) {
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
vnode_pager_clean_sync(vp);
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (error) {
if (error == ERESTART || error == EINTR) {

View File

@ -1428,11 +1428,9 @@ ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
/*
* Now, flush as required.
*/
if ((flags & (V_SAVE | V_VMIO)) == V_SAVE &&
vp->v_bufobj.bo_object != NULL) {
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
if ((flags & (V_SAVE | V_VMIO)) == V_SAVE) {
vnode_pager_clean_sync(vp);
/*
* If the page clean was interrupted, fail the invalidation.
* Not doing so, we run the risk of losing dirty pages in the

View File

@ -47,6 +47,8 @@
#include <sys/taskqueue.h>
#include <sys/vnode.h>
#include <vm/vm_param.h>
#include <vm/vnode_pager.h>
#include <vm/uma.h>
#include <fs/nfs/nfsport.h>
@ -236,7 +238,6 @@ ncl_inactive(struct vop_inactive_args *ap)
struct vnode *vp = ap->a_vp;
struct nfsnode *np;
struct thread *td;
boolean_t retv;
td = curthread;
np = VTONFS(vp);
@ -250,17 +251,9 @@ ncl_inactive(struct vop_inactive_args *ap)
* buffers/pages must be flushed before the close, so that the
* stateid is available for the writes.
*/
if (vp->v_object != NULL) {
VM_OBJECT_WLOCK(vp->v_object);
retv = vm_object_page_clean(vp->v_object, 0, 0,
OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_object);
} else
retv = TRUE;
if (retv == TRUE) {
(void)ncl_flush(vp, MNT_WAIT, td, 1, 0);
(void)nfsrpc_close(vp, 1, td);
}
vnode_pager_clean_sync(vp);
(void)ncl_flush(vp, MNT_WAIT, td, 1, 0);
(void)nfsrpc_close(vp, 1, td);
}
NFSLOCKNODE(np);

View File

@ -67,6 +67,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
#include <fs/nfs/nfsport.h>
#include <fs/nfsclient/nfsnode.h>
@ -766,9 +767,7 @@ nfs_open(struct vop_open_args *ap)
if (VN_IS_DOOMED(vp))
return (EBADF);
}
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
vnode_pager_clean_sync(vp);
}
/* Now, flush the buffer cache. */
@ -854,9 +853,7 @@ nfs_close(struct vop_close_args *ap)
if (VN_IS_DOOMED(vp) && ap->a_fflag != FNONBLOCK)
return (EBADF);
}
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_WUNLOCK(vp->v_object);
vnode_pager_clean_async(vp);
}
NFSLOCKNODE(np);
if (np->n_flag & NMODIFIED) {
@ -3637,7 +3634,6 @@ nfs_allocate(struct vop_allocate_args *ap)
{
struct vnode *vp = ap->a_vp;
struct thread *td = curthread;
vm_object_t obj;
struct nfsvattr nfsva;
struct nfsmount *nmp;
struct nfsnode *np;
@ -3667,12 +3663,7 @@ nfs_allocate(struct vop_allocate_args *ap)
* file's allocation on the server.
*/
if (error == 0) {
obj = vp->v_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
}
vnode_pager_clean_sync(vp);
error = ncl_flush(vp, MNT_WAIT, td, 1, 0);
}
if (error == 0)
@ -3908,9 +3899,7 @@ nfs_copy_file_range(struct vop_copy_file_range_args *ap)
vn_finished_write(mp);
goto relock;
}
VM_OBJECT_WLOCK(invp_obj);
vm_object_page_clean(invp_obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(invp_obj);
vnode_pager_clean_sync(invp);
}
error = ncl_flush(invp, MNT_WAIT, curthread, 1, 0);
}
@ -4069,7 +4058,6 @@ static int
nfs_ioctl(struct vop_ioctl_args *ap)
{
struct vnode *vp = ap->a_vp;
vm_object_t obj;
struct nfsvattr nfsva;
struct nfsmount *nmp;
int attrflag, content, error, ret;
@ -4114,10 +4102,7 @@ nfs_ioctl(struct vop_ioctl_args *ap)
* size is up to date on the Metadata Server.
*/
obj = vp->v_object;
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
vnode_pager_clean_sync(vp);
error = ncl_flush(vp, MNT_WAIT, ap->a_td, 1, 0);
if (error == 0)
error = nfsrpc_seek(vp, (off_t *)ap->a_data, &eof,

View File

@ -51,6 +51,8 @@
#include <sys/sysctl.h>
#include <nlm/nlm_prot.h>
#include <nlm/nlm.h>
#include <vm/vm_param.h>
#include <vm/vnode_pager.h>
FEATURE(nfsd, "NFSv4 server");
@ -1715,11 +1717,7 @@ nfsvno_fsync(struct vnode *vp, u_int64_t off, int cnt, struct ucred *cred,
/*
* Give up and do the whole thing
*/
if (vp->v_object && vm_object_mightbedirty(vp->v_object)) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_object);
}
vnode_pager_clean_sync(vp);
error = VOP_FSYNC(vp, MNT_WAIT, td);
} else {
/*

View File

@ -636,12 +636,7 @@ smbfs_vinvalbuf(struct vnode *vp, struct thread *td)
}
np->n_flag |= NFLUSHINPROG;
if (vp->v_bufobj.bo_object != NULL) {
VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
}
vnode_pager_clean_sync(vp);
error = vinvalbuf(vp, V_SAVE, PCATCH, 0);
while (error) {
if (error == ERESTART || error == EINTR) {

View File

@ -67,6 +67,7 @@
#include <vm/pmap.h>
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
#include <vm/uma.h>
#include <sys/aio.h>
@ -717,7 +718,6 @@ static int
aio_fsync_vnode(struct thread *td, struct vnode *vp, int op)
{
struct mount *mp;
vm_object_t obj;
int error;
for (;;) {
@ -725,12 +725,7 @@ aio_fsync_vnode(struct thread *td, struct vnode *vp, int op)
if (error != 0)
break;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
obj = vp->v_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_WUNLOCK(obj);
}
vnode_pager_clean_async(vp);
if (op == LIO_DSYNC)
error = VOP_FDATASYNC(vp, td);
else

View File

@ -94,6 +94,7 @@
#include <vm/vm_map.h>
#include <vm/vm_page.h>
#include <vm/vm_kern.h>
#include <vm/vnode_pager.h>
#include <vm/uma.h>
#if defined(DEBUG_VFS_LOCKS) && (!defined(INVARIANTS) || !defined(WITNESS))
@ -3989,7 +3990,6 @@ vdrop_recycle(struct vnode *vp)
static int
vinactivef(struct vnode *vp)
{
struct vm_object *obj;
int error;
ASSERT_VOP_ELOCKED(vp, "vinactive");
@ -3999,6 +3999,7 @@ vinactivef(struct vnode *vp)
vp->v_iflag |= VI_DOINGINACT;
vp->v_iflag &= ~VI_OWEINACT;
VI_UNLOCK(vp);
/*
* Before moving off the active list, we must be sure that any
* modified pages are converted into the vnode's dirty
@ -4009,12 +4010,9 @@ vinactivef(struct vnode *vp)
* point that VOP_INACTIVE() is called, there could still be
* pending I/O and dirty pages in the object.
*/
if ((obj = vp->v_object) != NULL && (vp->v_vflag & VV_NOSYNC) == 0 &&
vm_object_mightbedirty(obj)) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, 0);
VM_OBJECT_WUNLOCK(obj);
}
if ((vp->v_vflag & VV_NOSYNC) == 0)
vnode_pager_clean_async(vp);
error = VOP_INACTIVE(vp);
VI_LOCK(vp);
VNPASS(vp->v_iflag & VI_DOINGINACT, vp);
@ -4112,11 +4110,7 @@ vflush(struct mount *mp, int rootrefs, int flags, struct thread *td)
* vnodes open for writing.
*/
if (flags & WRITECLOSE) {
if (vp->v_object != NULL) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_WUNLOCK(vp->v_object);
}
vnode_pager_clean_async(vp);
do {
error = VOP_FSYNC(vp, MNT_WAIT, td);
} while (error == ERELOOKUP);
@ -5094,17 +5088,12 @@ static void __noinline
vfs_periodic_msync_inactive(struct mount *mp, int flags)
{
struct vnode *vp, *mvp;
struct vm_object *obj;
int lkflags, objflags;
int lkflags;
bool seen_defer;
lkflags = LK_EXCLUSIVE | LK_INTERLOCK;
if (flags != MNT_WAIT) {
if (flags != MNT_WAIT)
lkflags |= LK_NOWAIT;
objflags = OBJPC_NOSYNC;
} else {
objflags = OBJPC_SYNC;
}
MNT_VNODE_FOREACH_LAZY(vp, mp, mvp, vfs_periodic_msync_inactive_filter, NULL) {
seen_defer = false;
@ -5120,11 +5109,11 @@ vfs_periodic_msync_inactive(struct mount *mp, int flags)
continue;
}
if (vget(vp, lkflags) == 0) {
obj = vp->v_object;
if (obj != NULL && (vp->v_vflag & VV_NOSYNC) == 0) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, objflags);
VM_OBJECT_WUNLOCK(obj);
if ((vp->v_vflag & VV_NOSYNC) == 0) {
if (flags == MNT_WAIT)
vnode_pager_clean_sync(vp);
else
vnode_pager_clean_async(vp);
}
vput(vp);
if (seen_defer)

View File

@ -83,6 +83,7 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vnode_pager.h>
#include <vm/uma.h>
#include <fs/devfs/devfs.h>
@ -3546,11 +3547,7 @@ kern_fsync(struct thread *td, int fd, bool fullsync)
goto drop;
vn_lock(vp, vn_lktype_write(mp, vp) | LK_RETRY);
AUDIT_ARG_VNODE1(vp);
if (vp->v_object != NULL) {
VM_OBJECT_WLOCK(vp->v_object);
vm_object_page_clean(vp->v_object, 0, 0, 0);
VM_OBJECT_WUNLOCK(vp->v_object);
}
vnode_pager_clean_async(vp);
error = fullsync ? VOP_FSYNC(vp, MNT_WAIT, td) : VOP_FDATASYNC(vp, td);
VOP_UNLOCK(vp);
vn_finished_write(mp);

View File

@ -88,6 +88,7 @@
#include <vm/vm_object.h>
#include <vm/vm_page.h>
#include <vm/vm_pager.h>
#include <vm/vnode_pager.h>
#ifdef HWPMC_HOOKS
#include <sys/pmckern.h>
@ -2575,7 +2576,6 @@ int
vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
struct ucred *cred)
{
vm_object_t obj;
off_t size;
daddr_t bn, bnp;
uint64_t bsize;
@ -2600,12 +2600,7 @@ vn_bmap_seekhole_locked(struct vnode *vp, u_long cmd, off_t *off,
}
/* See the comment in ufs_bmap_seekdata(). */
obj = vp->v_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
}
vnode_pager_clean_sync(vp);
bsize = vp->v_mount->mnt_stat.f_iosize;
for (bn = noff / bsize; noff < size; bn++, noff += bsize -

View File

@ -73,7 +73,7 @@
* cannot include sys/param.h and should only be updated here.
*/
#undef __FreeBSD_version
#define __FreeBSD_version 1500008
#define __FreeBSD_version 1500009
/*
* __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD,

View File

@ -52,6 +52,7 @@
#include <vm/vm.h>
#include <vm/vm_extern.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
static int ffs_rawread_readahead(struct vnode *vp,
caddr_t udata,
@ -132,15 +133,10 @@ ffs_rawread_sync(struct vnode *vp)
vn_finished_write(mp);
return (EIO);
}
/* Attempt to msync mmap() regions to clean dirty mmap */
if ((obj = vp->v_object) != NULL &&
vm_object_mightbedirty(obj)) {
VI_UNLOCK(vp);
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
} else
VI_UNLOCK(vp);
VI_UNLOCK(vp);
/* Attempt to msync mmap() regions to clean dirty mmap */
vnode_pager_clean_sync(vp);
/* Wait for pending writes to complete */
BO_LOCK(bo);

View File

@ -48,6 +48,7 @@
#include <vm/vm.h>
#include <vm/vm_object.h>
#include <vm/vnode_pager.h>
#include <ufs/ufs/extattr.h>
#include <ufs/ufs/quota.h>
@ -347,7 +348,6 @@ ufs_bmap_seekdata(struct vnode *vp, off_t *offp)
struct inode *ip;
struct mount *mp;
struct ufsmount *ump;
vm_object_t obj;
ufs2_daddr_t bn, daddr, nextbn;
uint64_t bsize;
off_t numblks;
@ -370,12 +370,7 @@ ufs_bmap_seekdata(struct vnode *vp, off_t *offp)
* pages into buffer writes to ensure that we see all
* allocated data.
*/
obj = vp->v_object;
if (obj != NULL) {
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, OBJPC_SYNC);
VM_OBJECT_WUNLOCK(obj);
}
vnode_pager_clean_sync(vp);
bsize = mp->mnt_stat.f_iosize;
for (bn = *offp / bsize, numblks = howmany(ip->i_size, bsize);

View File

@ -1690,3 +1690,30 @@ vnode_pager_getvp(vm_object_t object, struct vnode **vpp, bool *vp_heldp)
{
*vpp = object->handle;
}
static void
vnode_pager_clean1(struct vnode *vp, int sync_flags)
{
struct vm_object *obj;
ASSERT_VOP_LOCKED(vp, "needs lock for writes");
obj = vp->v_object;
if (obj == NULL)
return;
VM_OBJECT_WLOCK(obj);
vm_object_page_clean(obj, 0, 0, sync_flags);
VM_OBJECT_WUNLOCK(obj);
}
void
vnode_pager_clean_sync(struct vnode *vp)
{
vnode_pager_clean1(vp, OBJPC_SYNC);
}
void
vnode_pager_clean_async(struct vnode *vp)
{
vnode_pager_clean1(vp, 0);
}

View File

@ -39,6 +39,9 @@
#ifdef _KERNEL
struct vnode;
void vnode_pager_clean_sync(struct vnode *vp);
void vnode_pager_clean_async(struct vnode *vp);
int vnode_pager_generic_getpages(struct vnode *vp, vm_page_t *m,
int count, int *rbehind, int *rahead, vop_getpages_iodone_t iodone,
void *arg);