VM performance improvements, and reorder some operations in VM fault

in anticipation of a fix in pmap that will allow the mlock system call to work
without panicing the system.
This commit is contained in:
John Dyson 1996-03-28 04:53:28 +00:00
parent f32dbbeeed
commit 30dcfc09f2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=14865
5 changed files with 155 additions and 141 deletions

View file

@ -66,7 +66,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_fault.c,v 1.41 1996/03/02 02:54:18 dyson Exp $
* $Id: vm_fault.c,v 1.42 1996/03/09 06:48:26 dyson Exp $
*/
/*
@ -705,15 +705,15 @@ RetryFault:;
}
}
UNLOCK_THINGS;
m->flags |= PG_MAPPED|PG_REFERENCED;
m->flags &= ~PG_ZERO;
m->valid = VM_PAGE_BITS_ALL;
pmap_enter(map->pmap, vaddr, VM_PAGE_TO_PHYS(m), prot, wired);
#if 0
if (vp && change_wiring == 0 && wired == 0)
if (vp && (change_wiring == 0) && (wired == 0))
pmap_prefault(map->pmap, vaddr, entry, first_object);
#endif
/*
* If the page is not wired down, then put it where the pageout daemon
@ -742,7 +742,7 @@ RetryFault:;
*/
PAGE_WAKEUP(m);
UNLOCK_AND_DEALLOCATE;
vm_object_deallocate(first_object);
return (KERN_SUCCESS);

View file

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_map.c,v 1.39 1996/03/13 01:18:14 dyson Exp $
* $Id: vm_map.c,v 1.40 1996/03/28 04:22:17 dyson Exp $
*/
/*
@ -242,7 +242,9 @@ vmspace_free(vm)
if (--vm->vm_refcnt == 0) {
int s, i;
/*
pmap_remove(&vm->vm_pmap, (vm_offset_t) kstack, (vm_offset_t) kstack+UPAGES*PAGE_SIZE);
*/
/*
* Lock the map, to wait out all other references to it.
@ -250,9 +252,9 @@ vmspace_free(vm)
* the pmap module to reclaim anything left.
*/
vm_map_lock(&vm->vm_map);
vm_object_deallocate(vm->vm_upages_obj);
(void) vm_map_delete(&vm->vm_map, vm->vm_map.min_offset,
vm->vm_map.max_offset);
vm_object_deallocate(vm->vm_upages_obj);
vm_map_unlock(&vm->vm_map);
while( vm->vm_map.ref_count != 1)
tsleep(&vm->vm_map.ref_count, PVM, "vmsfre", 0);
@ -503,134 +505,6 @@ vm_map_deallocate(map)
FREE(map, M_VMMAP);
}
/*
* vm_map_insert:
*
* Inserts the given whole VM object into the target
* map at the specified address range. The object's
* size should match that of the address range.
*
* Requires that the map be locked, and leaves it so.
*/
int
vm_map_insert(map, object, offset, start, end, prot, max, cow)
vm_map_t map;
vm_object_t object;
vm_ooffset_t offset;
vm_offset_t start;
vm_offset_t end;
vm_prot_t prot, max;
int cow;
{
register vm_map_entry_t new_entry;
register vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
/*
* Check that the start and end points are not bogus.
*/
if ((start < map->min_offset) || (end > map->max_offset) ||
(start >= end))
return (KERN_INVALID_ADDRESS);
/*
* Find the entry prior to the proposed starting address; if it's part
* of an existing entry, this range is bogus.
*/
if (vm_map_lookup_entry(map, start, &temp_entry))
return (KERN_NO_SPACE);
prev_entry = temp_entry;
/*
* Assert that the next entry doesn't overlap the end point.
*/
if ((prev_entry->next != &map->header) &&
(prev_entry->next->start < end))
return (KERN_NO_SPACE);
/*
* See if we can avoid creating a new entry by extending one of our
* neighbors.
*/
if (object == NULL) {
if ((prev_entry != &map->header) &&
(prev_entry->end == start) &&
(map->is_main_map) &&
(prev_entry->is_a_map == FALSE) &&
(prev_entry->is_sub_map == FALSE) &&
(prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
(prev_entry->protection == prot) &&
(prev_entry->max_protection == max) &&
(prev_entry->wired_count == 0)) {
if (vm_object_coalesce(prev_entry->object.vm_object,
OFF_TO_IDX(prev_entry->offset),
(vm_size_t) (prev_entry->end
- prev_entry->start),
(vm_size_t) (end - prev_entry->end))) {
/*
* Coalesced the two objects - can extend the
* previous map entry to include the new
* range.
*/
map->size += (end - prev_entry->end);
prev_entry->end = end;
return (KERN_SUCCESS);
}
}
}
/*
* Create a new entry
*/
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
new_entry->is_a_map = FALSE;
new_entry->is_sub_map = FALSE;
new_entry->object.vm_object = object;
new_entry->offset = offset;
if (cow & MAP_COPY_NEEDED)
new_entry->needs_copy = TRUE;
else
new_entry->needs_copy = FALSE;
if (cow & MAP_COPY_ON_WRITE)
new_entry->copy_on_write = TRUE;
else
new_entry->copy_on_write = FALSE;
if (map->is_main_map) {
new_entry->inheritance = VM_INHERIT_DEFAULT;
new_entry->protection = prot;
new_entry->max_protection = max;
new_entry->wired_count = 0;
}
/*
* Insert the new entry into the list
*/
vm_map_entry_link(map, prev_entry, new_entry);
map->size += new_entry->end - new_entry->start;
/*
* Update the free space hint
*/
if ((map->first_free == prev_entry) &&
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
return (KERN_SUCCESS);
}
/*
* SAVE_HINT:
*
@ -716,6 +590,139 @@ vm_map_lookup_entry(map, address, entry)
return (FALSE);
}
/*
* vm_map_insert:
*
* Inserts the given whole VM object into the target
* map at the specified address range. The object's
* size should match that of the address range.
*
* Requires that the map be locked, and leaves it so.
*/
int
vm_map_insert(map, object, offset, start, end, prot, max, cow)
vm_map_t map;
vm_object_t object;
vm_ooffset_t offset;
vm_offset_t start;
vm_offset_t end;
vm_prot_t prot, max;
int cow;
{
register vm_map_entry_t new_entry;
register vm_map_entry_t prev_entry;
vm_map_entry_t temp_entry;
/*
* Check that the start and end points are not bogus.
*/
if ((start < map->min_offset) || (end > map->max_offset) ||
(start >= end))
return (KERN_INVALID_ADDRESS);
/*
* Find the entry prior to the proposed starting address; if it's part
* of an existing entry, this range is bogus.
*/
if (vm_map_lookup_entry(map, start, &temp_entry))
return (KERN_NO_SPACE);
prev_entry = temp_entry;
/*
* Assert that the next entry doesn't overlap the end point.
*/
if ((prev_entry->next != &map->header) &&
(prev_entry->next->start < end))
return (KERN_NO_SPACE);
if ((prev_entry != &map->header) &&
(prev_entry->end == start) &&
(prev_entry->is_a_map == FALSE) &&
(prev_entry->is_sub_map == FALSE) &&
((object == NULL) || (prev_entry->object.vm_object == object)) &&
(prev_entry->inheritance == VM_INHERIT_DEFAULT) &&
(prev_entry->protection == prot) &&
(prev_entry->max_protection == max) &&
(prev_entry->wired_count == 0)) {
/*
* See if we can avoid creating a new entry by extending one of our
* neighbors.
*/
if (object == NULL) {
if (vm_object_coalesce(prev_entry->object.vm_object,
OFF_TO_IDX(prev_entry->offset),
(vm_size_t) (prev_entry->end
- prev_entry->start),
(vm_size_t) (end - prev_entry->end))) {
/*
* Coalesced the two objects - can extend the
* previous map entry to include the new
* range.
*/
map->size += (end - prev_entry->end);
prev_entry->end = end;
return (KERN_SUCCESS);
}
} /* else if ((object == prev_entry->object.vm_object) &&
(prev_entry->offset + (prev_entry->end - prev_entry->start) == offset)) {
map->size += (end - prev_entry->end);
prev_entry->end = end;
printf("map optim 1\n");
return (KERN_SUCCESS);
} */
}
/*
* Create a new entry
*/
new_entry = vm_map_entry_create(map);
new_entry->start = start;
new_entry->end = end;
new_entry->is_a_map = FALSE;
new_entry->is_sub_map = FALSE;
new_entry->object.vm_object = object;
new_entry->offset = offset;
if (cow & MAP_COPY_NEEDED)
new_entry->needs_copy = TRUE;
else
new_entry->needs_copy = FALSE;
if (cow & MAP_COPY_ON_WRITE)
new_entry->copy_on_write = TRUE;
else
new_entry->copy_on_write = FALSE;
if (map->is_main_map) {
new_entry->inheritance = VM_INHERIT_DEFAULT;
new_entry->protection = prot;
new_entry->max_protection = max;
new_entry->wired_count = 0;
}
/*
* Insert the new entry into the list
*/
vm_map_entry_link(map, prev_entry, new_entry);
map->size += new_entry->end - new_entry->start;
/*
* Update the free space hint
*/
if ((map->first_free == prev_entry) &&
(prev_entry->end >= new_entry->start))
map->first_free = new_entry;
return (KERN_SUCCESS);
}
/*
* Find sufficient space for `length' bytes in the given map, starting at
* `start'. The map must be locked. Returns 0 on success, 1 on no space.

View file

@ -61,7 +61,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_object.c,v 1.64 1996/03/02 02:54:22 dyson Exp $
* $Id: vm_object.c,v 1.65 1996/03/11 06:11:41 hsu Exp $
*/
/*
@ -1257,6 +1257,10 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
return (TRUE);
}
if (prev_object->type != OBJT_DEFAULT) {
return (FALSE);
}
/*
* Try to collapse the object first
*/
@ -1269,7 +1273,6 @@ vm_object_coalesce(prev_object, prev_pindex, prev_size, next_size)
*/
if (prev_object->ref_count > 1 ||
prev_object->type != OBJT_DEFAULT ||
prev_object->backing_object != NULL) {
return (FALSE);
}

View file

@ -34,7 +34,7 @@
* SUCH DAMAGE.
*
* from: @(#)vm_page.c 7.4 (Berkeley) 5/7/91
* $Id: vm_page.c,v 1.48 1996/03/02 02:54:24 dyson Exp $
* $Id: vm_page.c,v 1.49 1996/03/09 06:56:39 dyson Exp $
*/
/*
@ -1006,8 +1006,10 @@ vm_page_cache(m)
{
int s;
if ((m->flags & PG_BUSY) || m->busy || m->wire_count)
if ((m->flags & PG_BUSY) || m->busy || m->wire_count) {
printf("vm_page_cache: attempting to cache busy page\n");
return;
}
if (m->queue == PQ_CACHE)
return;

View file

@ -65,7 +65,7 @@
* any improvements or extensions that they make and grant Carnegie the
* rights to redistribute these changes.
*
* $Id: vm_pageout.c,v 1.67 1996/03/09 06:53:27 dyson Exp $
* $Id: vm_pageout.c,v 1.68 1996/03/11 06:11:43 hsu Exp $
*/
/*
@ -608,8 +608,9 @@ vm_pageout_scan()
if (m->dirty == 0) {
vm_page_test_dirty(m);
} else if (m->dirty != 0)
} else if (m->dirty != 0) {
m->dirty = VM_PAGE_BITS_ALL;
}
if (m->valid == 0) {
vm_page_protect(m, VM_PROT_NONE);
vm_page_free(m);
@ -729,6 +730,7 @@ vm_pageout_scan()
vm_page_deactivate(m);
}
} else {
vm_page_protect(m, VM_PROT_NONE);
vm_page_deactivate(m);
--page_shortage;
}