Fix interaction between largepages and seals/writes.

On write with SHM_GROW_ON_WRITE, use proper truncate.
Do not allow to grow largepage shm if F_SEAL_GROW is set. Note that
shrinks are not supported at all due to unmanaged mappings.
Call to vm_pager_update_writecount() is only valid for swap objects,
skip it for unmanaged largepages.
Largepages cannot support write sealing.
Do not writecnt largepage mappings.

Reported by:	kevans
Reviewed by:	kevans, markj
Sponsored by:	The FreeBSD Foundation
MFC after:	1 week
Differential revision:	https://reviews.freebsd.org/D26394
This commit is contained in:
Konstantin Belousov 2020-09-10 20:54:44 +00:00
parent 99efb80d00
commit 7978363417
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=365613

View file

@ -450,9 +450,7 @@ shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
error = 0;
if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
size > shmfd->shm_size) {
VM_OBJECT_WLOCK(shmfd->shm_object);
error = shm_dotruncate_locked(shmfd, size, rl_cookie);
VM_OBJECT_WUNLOCK(shmfd->shm_object);
error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
}
if (error == 0)
error = uiomove_object(shmfd->shm_object,
@ -767,6 +765,9 @@ shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
#endif
}
if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
return (EPERM);
aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
aflags |= VM_ALLOC_WAITFAIL;
@ -1416,7 +1417,7 @@ sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
static int
shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
vm_ooffset_t foff, bool writecounted, struct thread *td)
vm_ooffset_t foff, struct thread *td)
{
struct vmspace *vms;
vm_map_entry_t next_entry, prev_entry;
@ -1448,8 +1449,6 @@ shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
docow |= MAP_INHERIT_SHARE;
if ((flags & MAP_NOCORE) != 0)
docow |= MAP_DISABLE_COREDUMP;
if (writecounted)
docow |= MAP_WRITECOUNT;
mask = pagesizes[shmfd->shm_lp_psind] - 1;
if ((foff & mask) != 0)
@ -1594,12 +1593,15 @@ shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
mtx_unlock(&shm_timestamp_lock);
vm_object_reference(shmfd->shm_object);
if (writecnt)
vm_pager_update_writecount(shmfd->shm_object, 0, objsize);
if (shm_largepage(shmfd)) {
writecnt = false;
error = shm_mmap_large(shmfd, map, addr, objsize, prot,
maxprot, flags, foff, writecnt, td);
maxprot, flags, foff, td);
} else {
if (writecnt) {
vm_pager_update_writecount(shmfd->shm_object, 0,
objsize);
}
error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
shmfd->shm_object, foff, writecnt, td);
}
@ -1838,6 +1840,11 @@ shm_add_seals(struct file *fp, int seals)
}
nseals = seals & ~shmfd->shm_seals;
if ((nseals & F_SEAL_WRITE) != 0) {
if (shm_largepage(shmfd)) {
error = ENOTSUP;
goto out;
}
/*
* The rangelock above prevents writable mappings from being
* added after we've started applying seals. The RLOCK here