Push down the acquisition of the page queues lock into vm_page_unwire().

Update the comment describing which lock should be held on entry to
vm_page_wire().

Reviewed by:	kib
This commit is contained in:
Alan Cox 2010-05-05 03:45:46 +00:00
parent d6da836201
commit e3ef0d2fcf
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=207644
7 changed files with 15 additions and 31 deletions

View file

@ -624,9 +624,7 @@ agp_generic_bind_memory(device_t dev, struct agp_memory *mem,
if (k >= i) if (k >= i)
vm_page_wakeup(m); vm_page_wakeup(m);
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
} }
VM_OBJECT_UNLOCK(mem->am_obj); VM_OBJECT_UNLOCK(mem->am_obj);
@ -660,9 +658,7 @@ agp_generic_unbind_memory(device_t dev, struct agp_memory *mem)
for (i = 0; i < mem->am_size; i += PAGE_SIZE) { for (i = 0; i < mem->am_size; i += PAGE_SIZE) {
m = vm_page_lookup(mem->am_obj, atop(i)); m = vm_page_lookup(mem->am_obj, atop(i));
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
} }
VM_OBJECT_UNLOCK(mem->am_obj); VM_OBJECT_UNLOCK(mem->am_obj);

View file

@ -1011,9 +1011,7 @@ agp_i810_free_memory(device_t dev, struct agp_memory *mem)
VM_OBJECT_LOCK(mem->am_obj); VM_OBJECT_LOCK(mem->am_obj);
m = vm_page_lookup(mem->am_obj, 0); m = vm_page_lookup(mem->am_obj, 0);
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
VM_OBJECT_UNLOCK(mem->am_obj); VM_OBJECT_UNLOCK(mem->am_obj);
} else { } else {

View file

@ -461,9 +461,7 @@ tmpfs_nocacheread(vm_object_t tobj, vm_pindex_t idx,
VM_OBJECT_LOCK(tobj); VM_OBJECT_LOCK(tobj);
out: out:
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, TRUE); vm_page_unwire(m, TRUE);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
vm_page_wakeup(m); vm_page_wakeup(m);
vm_object_pip_subtract(tobj, 1); vm_object_pip_subtract(tobj, 1);

View file

@ -1571,7 +1571,6 @@ vfs_vmio_release(struct buf *bp)
* everything on the inactive queue. * everything on the inactive queue.
*/ */
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
/* /*
* We don't mess with busy pages, it is * We don't mess with busy pages, it is
@ -1580,6 +1579,7 @@ vfs_vmio_release(struct buf *bp)
*/ */
if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 && if ((m->oflags & VPO_BUSY) == 0 && m->busy == 0 &&
m->wire_count == 0) { m->wire_count == 0) {
vm_page_lock_queues();
/* /*
* Might as well free the page if we can and it has * Might as well free the page if we can and it has
* no valid data. We also free the page if the * no valid data. We also free the page if the
@ -1593,8 +1593,8 @@ vfs_vmio_release(struct buf *bp)
} else if (buf_vm_page_count_severe()) { } else if (buf_vm_page_count_severe()) {
vm_page_try_to_cache(m); vm_page_try_to_cache(m);
} }
vm_page_unlock_queues();
} }
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
} }
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);
@ -2957,9 +2957,7 @@ allocbuf(struct buf *bp, int size)
bp->b_pages[i] = NULL; bp->b_pages[i] = NULL;
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
} }
VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object); VM_OBJECT_UNLOCK(bp->b_bufobj->bo_object);

View file

@ -799,9 +799,7 @@ RetryFault:;
vm_page_unlock(fs.first_m); vm_page_unlock(fs.first_m);
vm_page_lock(fs.m); vm_page_lock(fs.m);
vm_page_lock_queues();
vm_page_unwire(fs.m, FALSE); vm_page_unwire(fs.m, FALSE);
vm_page_unlock_queues();
vm_page_unlock(fs.m); vm_page_unlock(fs.m);
} }
/* /*
@ -1112,6 +1110,7 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
{ {
vm_paddr_t pa; vm_paddr_t pa;
vm_offset_t va; vm_offset_t va;
vm_page_t m;
pmap_t pmap; pmap_t pmap;
pmap = vm_map_pmap(map); pmap = vm_map_pmap(map);
@ -1125,11 +1124,10 @@ vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
if (pa != 0) { if (pa != 0) {
pmap_change_wiring(pmap, va, FALSE); pmap_change_wiring(pmap, va, FALSE);
if (!fictitious) { if (!fictitious) {
vm_page_lock(PHYS_TO_VM_PAGE(pa)); m = PHYS_TO_VM_PAGE(pa);
vm_page_lock_queues(); vm_page_lock(m);
vm_page_unwire(PHYS_TO_VM_PAGE(pa), 1); vm_page_unwire(m, TRUE);
vm_page_unlock_queues(); vm_page_unlock(m);
vm_page_unlock(PHYS_TO_VM_PAGE(pa));
} }
} }
} }
@ -1275,9 +1273,7 @@ vm_fault_copy_entry(vm_map_t dst_map, vm_map_t src_map,
if (upgrade) { if (upgrade) {
vm_page_lock(src_m); vm_page_lock(src_m);
vm_page_lock_queues();
vm_page_unwire(src_m, 0); vm_page_unwire(src_m, 0);
vm_page_unlock_queues();
vm_page_unlock(src_m); vm_page_unlock(src_m);
vm_page_lock(dst_m); vm_page_lock(dst_m);

View file

@ -529,9 +529,7 @@ vm_thread_swapout(struct thread *td)
panic("vm_thread_swapout: kstack already missing?"); panic("vm_thread_swapout: kstack already missing?");
vm_page_dirty(m); vm_page_dirty(m);
vm_page_lock(m); vm_page_lock(m);
vm_page_lock_queues();
vm_page_unwire(m, 0); vm_page_unwire(m, 0);
vm_page_unlock_queues();
vm_page_unlock(m); vm_page_unlock(m);
} }
VM_OBJECT_UNLOCK(ksobj); VM_OBJECT_UNLOCK(ksobj);

View file

@ -1532,7 +1532,7 @@ vm_page_free_toq(vm_page_t m)
* another map, removing it from paging queues * another map, removing it from paging queues
* as necessary. * as necessary.
* *
* The page queues must be locked. * The page must be locked.
* This routine may not block. * This routine may not block.
*/ */
void void
@ -1584,31 +1584,31 @@ vm_page_wire(vm_page_t m)
* be placed in the cache - for example, just after dirtying a page. * be placed in the cache - for example, just after dirtying a page.
* dirty pages in the cache are not allowed. * dirty pages in the cache are not allowed.
* *
* The page queues must be locked. * The page must be locked.
* This routine may not block. * This routine may not block.
*/ */
void void
vm_page_unwire(vm_page_t m, int activate) vm_page_unwire(vm_page_t m, int activate)
{ {
if ((m->flags & PG_UNMANAGED) == 0) { if ((m->flags & PG_UNMANAGED) == 0)
mtx_assert(&vm_page_queue_mtx, MA_OWNED);
vm_page_lock_assert(m, MA_OWNED); vm_page_lock_assert(m, MA_OWNED);
}
if (m->flags & PG_FICTITIOUS) if (m->flags & PG_FICTITIOUS)
return; return;
if (m->wire_count > 0) { if (m->wire_count > 0) {
m->wire_count--; m->wire_count--;
if (m->wire_count == 0) { if (m->wire_count == 0) {
atomic_subtract_int(&cnt.v_wire_count, 1); atomic_subtract_int(&cnt.v_wire_count, 1);
if (m->flags & PG_UNMANAGED) { if ((m->flags & PG_UNMANAGED) != 0)
; return;
} else if (activate) vm_page_lock_queues();
if (activate)
vm_page_enqueue(PQ_ACTIVE, m); vm_page_enqueue(PQ_ACTIVE, m);
else { else {
vm_page_flag_clear(m, PG_WINATCFLS); vm_page_flag_clear(m, PG_WINATCFLS);
vm_page_enqueue(PQ_INACTIVE, m); vm_page_enqueue(PQ_INACTIVE, m);
} }
vm_page_unlock_queues();
} }
} else { } else {
panic("vm_page_unwire: invalid wire count: %d", m->wire_count); panic("vm_page_unwire: invalid wire count: %d", m->wire_count);