mirror of
https://github.com/freebsd/freebsd-src
synced 2024-09-20 16:54:02 +00:00
- Remove Giant from msync(2). Giant is still acquired by the lower layers
if we drop into the pmap or vnode layers. - Migrate the handling of zero-length msync(2)s into vm_map_sync() so that multithread applications can't change the map between implementing the zero-length hack in msync(2) and reacquiring the map lock in vm_map_sync(). Reviewed by: tegge
This commit is contained in:
parent
65b5d09597
commit
637315ed9c
Notes:
svn2git
2020-12-20 02:59:44 +00:00
svn path=/head/; revision=122367
|
@ -1952,6 +1952,13 @@ vm_map_wire(vm_map_t map, vm_offset_t start, vm_offset_t end,
|
|||
* If syncio is TRUE, dirty pages are written synchronously.
|
||||
* If invalidate is TRUE, any cached pages are freed as well.
|
||||
*
|
||||
* If the size of the region from start to end is zero, we are
|
||||
* supposed to flush all modified pages within the region containing
|
||||
* start. Unfortunately, a region can be split or coalesced with
|
||||
* neighboring regions, making it difficult to determine what the
|
||||
* original region was. Therefore, we approximate this requirement by
|
||||
* flushing the current region containing start.
|
||||
*
|
||||
* Returns an error if any part of the specified range is not mapped.
|
||||
*/
|
||||
int
|
||||
|
@ -1973,6 +1980,9 @@ vm_map_sync(
|
|||
if (!vm_map_lookup_entry(map, start, &entry)) {
|
||||
vm_map_unlock_read(map);
|
||||
return (KERN_INVALID_ADDRESS);
|
||||
} else if (start == end) {
|
||||
start = entry->start;
|
||||
end = entry->end;
|
||||
}
|
||||
/*
|
||||
* Make a first pass to check for holes.
|
||||
|
|
|
@ -546,40 +546,13 @@ msync(td, uap)
|
|||
if ((flags & (MS_ASYNC|MS_INVALIDATE)) == (MS_ASYNC|MS_INVALIDATE))
|
||||
return (EINVAL);
|
||||
|
||||
mtx_lock(&Giant);
|
||||
|
||||
map = &td->td_proc->p_vmspace->vm_map;
|
||||
|
||||
/*
|
||||
* XXX Gak! If size is zero we are supposed to sync "all modified
|
||||
* pages with the region containing addr". Unfortunately, we don't
|
||||
* really keep track of individual mmaps so we approximate by flushing
|
||||
* the range of the map entry containing addr. This can be incorrect
|
||||
* if the region splits or is coalesced with a neighbor.
|
||||
*/
|
||||
if (size == 0) {
|
||||
vm_map_entry_t entry;
|
||||
|
||||
vm_map_lock_read(map);
|
||||
rv = vm_map_lookup_entry(map, addr, &entry);
|
||||
vm_map_unlock_read(map);
|
||||
if (rv == FALSE) {
|
||||
rv = -1;
|
||||
goto done2;
|
||||
}
|
||||
addr = entry->start;
|
||||
size = entry->end - entry->start;
|
||||
}
|
||||
|
||||
/*
|
||||
* Clean the pages and interpret the return value.
|
||||
*/
|
||||
rv = vm_map_sync(map, addr, addr + size, (flags & MS_ASYNC) == 0,
|
||||
(flags & MS_INVALIDATE) != 0);
|
||||
|
||||
done2:
|
||||
mtx_unlock(&Giant);
|
||||
|
||||
switch (rv) {
|
||||
case KERN_SUCCESS:
|
||||
return (0);
|
||||
|
|
Loading…
Reference in a new issue