Improve page LRU quality and simplify the logic.

- Don't short-circuit aging tests for unmapped objects.  This biases
   against unmapped file pages and transient mappings.
 - Always honor PGA_REFERENCED.  We can now use this after soft busying
   to lazily restart the LRU.
 - Don't transition directly from active to cached bypassing the inactive
   queue.  This frees recently used data much too early.
 - Rename actcount to act_delta to be more consistent with use and meaning.

Reviewed by:	kib, alc
Sponsored by:	EMC / Isilon Storage Division
This commit is contained in:
Jeff Roberson 2013-07-26 23:22:05 +00:00
parent 4202b2ded4
commit bb7858ea20
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=253697

View file

@ -708,7 +708,7 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
{ {
vm_object_t backing_object, object; vm_object_t backing_object, object;
vm_page_t p; vm_page_t p;
int actcount, remove_mode; int act_delta, remove_mode;
VM_OBJECT_ASSERT_LOCKED(first_object); VM_OBJECT_ASSERT_LOCKED(first_object);
if ((first_object->flags & OBJ_FICTITIOUS) != 0) if ((first_object->flags & OBJ_FICTITIOUS) != 0)
@ -739,17 +739,17 @@ vm_pageout_object_deactivate_pages(pmap_t pmap, vm_object_t first_object,
vm_page_unlock(p); vm_page_unlock(p);
continue; continue;
} }
actcount = pmap_ts_referenced(p); act_delta = pmap_ts_referenced(p);
if ((p->aflags & PGA_REFERENCED) != 0) { if ((p->aflags & PGA_REFERENCED) != 0) {
if (actcount == 0) if (act_delta == 0)
actcount = 1; act_delta = 1;
vm_page_aflag_clear(p, PGA_REFERENCED); vm_page_aflag_clear(p, PGA_REFERENCED);
} }
if (p->queue != PQ_ACTIVE && actcount != 0) { if (p->queue != PQ_ACTIVE && act_delta != 0) {
vm_page_activate(p); vm_page_activate(p);
p->act_count += actcount; p->act_count += act_delta;
} else if (p->queue == PQ_ACTIVE) { } else if (p->queue == PQ_ACTIVE) {
if (actcount == 0) { if (act_delta == 0) {
p->act_count -= min(p->act_count, p->act_count -= min(p->act_count,
ACT_DECLINE); ACT_DECLINE);
if (!remove_mode && p->act_count == 0) { if (!remove_mode && p->act_count == 0) {
@ -869,7 +869,7 @@ vm_pageout_scan(int pass)
int page_shortage, maxscan, pcount; int page_shortage, maxscan, pcount;
int addl_page_shortage; int addl_page_shortage;
vm_object_t object; vm_object_t object;
int actcount; int act_delta;
int vnodes_skipped = 0; int vnodes_skipped = 0;
int maxlaunder; int maxlaunder;
boolean_t queues_locked; boolean_t queues_locked;
@ -989,44 +989,40 @@ vm_pageout_scan(int pass)
queues_locked = FALSE; queues_locked = FALSE;
/* /*
* If the object is not being used, we ignore previous * We bump the activation count if the page has been
* references. * referenced while in the inactive queue. This makes
*/ * it less likely that the page will be added back to the
if (object->ref_count == 0) { * inactive queue prematurely again. Here we check the
vm_page_aflag_clear(m, PGA_REFERENCED);
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
/*
* Otherwise, if the page has been referenced while in the
* inactive queue, we bump the "activation count" upwards,
* making it less likely that the page will be added back to
* the inactive queue prematurely again. Here we check the
* page tables (or emulated bits, if any), given the upper * page tables (or emulated bits, if any), given the upper
* level VM system not knowing anything about existing * level VM system not knowing anything about existing
* references. * references.
*/ */
} else if ((m->aflags & PGA_REFERENCED) == 0 && act_delta = 0;
(actcount = pmap_ts_referenced(m)) != 0) { if ((m->aflags & PGA_REFERENCED) != 0) {
vm_page_activate(m); vm_page_aflag_clear(m, PGA_REFERENCED);
VM_OBJECT_WUNLOCK(object); act_delta = 1;
m->act_count += actcount + ACT_ADVANCE; }
vm_page_unlock(m); if (object->ref_count != 0) {
goto relock_queues; act_delta += pmap_ts_referenced(m);
} else {
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
} }
/* /*
* If the upper level VM system knows about any page * If the upper level VM system knows about any page
* references, we activate the page. We also set the * references, we reactivate the page or requeue it.
* "activation count" higher than normal so that we will less
* likely place pages back onto the inactive queue again.
*/ */
if ((m->aflags & PGA_REFERENCED) != 0) { if (act_delta != 0) {
vm_page_aflag_clear(m, PGA_REFERENCED); if (object->ref_count) {
actcount = pmap_ts_referenced(m); vm_page_activate(m);
vm_page_activate(m); m->act_count += act_delta + ACT_ADVANCE;
} else {
vm_pagequeue_lock(pq);
queues_locked = TRUE;
vm_page_requeue_locked(m);
}
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
m->act_count += actcount + ACT_ADVANCE + 1;
vm_page_unlock(m); vm_page_unlock(m);
goto relock_queues; goto relock_queues;
} }
@ -1324,50 +1320,40 @@ vm_pageout_scan(int pass)
/* /*
* Check to see "how much" the page has been used. * Check to see "how much" the page has been used.
*/ */
actcount = 0; act_delta = 0;
if (object->ref_count != 0) { if (m->aflags & PGA_REFERENCED) {
if (m->aflags & PGA_REFERENCED) { vm_page_aflag_clear(m, PGA_REFERENCED);
actcount += 1; act_delta += 1;
}
actcount += pmap_ts_referenced(m);
if (actcount) {
m->act_count += ACT_ADVANCE + actcount;
if (m->act_count > ACT_MAX)
m->act_count = ACT_MAX;
}
} }
if (object->ref_count != 0)
act_delta += pmap_ts_referenced(m);
/* /*
* Since we have "tested" this bit, we need to clear it now. * Advance or decay the act_count based on recent usage.
*/ */
vm_page_aflag_clear(m, PGA_REFERENCED); if (act_delta) {
m->act_count += ACT_ADVANCE + act_delta;
/* if (m->act_count > ACT_MAX)
* Only if an object is currently being used, do we use the m->act_count = ACT_MAX;
* page activation count stats. } else {
*/
if (actcount != 0 && object->ref_count != 0)
vm_page_requeue_locked(m);
else {
m->act_count -= min(m->act_count, ACT_DECLINE); m->act_count -= min(m->act_count, ACT_DECLINE);
if (object->ref_count == 0 || act_delta = m->act_count;
m->act_count == 0) {
page_shortage--;
/* Dequeue to avoid later lock recursion. */
vm_page_dequeue_locked(m);
if (object->ref_count == 0) {
KASSERT(!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
if (m->dirty == 0)
vm_page_cache(m);
else
vm_page_deactivate(m);
} else {
vm_page_deactivate(m);
}
} else
vm_page_requeue_locked(m);
} }
/*
* Move this page to the tail of the active or inactive
* queue depending on usage.
*/
if (act_delta == 0) {
KASSERT(object->ref_count != 0 ||
!pmap_page_is_mapped(m),
("vm_pageout_scan: page %p is mapped", m));
/* Dequeue to avoid later lock recursion. */
vm_page_dequeue_locked(m);
vm_page_deactivate(m);
page_shortage--;
} else
vm_page_requeue_locked(m);
vm_page_unlock(m); vm_page_unlock(m);
VM_OBJECT_WUNLOCK(object); VM_OBJECT_WUNLOCK(object);
m = next; m = next;