[PATCH] Swap Migration V5: Add CONFIG_MIGRATION for page migration support

Include page migration if the system is NUMA or having a memory model that
allows distinct areas of memory (SPARSEMEM, DISCONTIGMEM).

And:
- Only include lru_add_drain_per_cpu if building for an SMP system.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Christoph Lameter 2006-01-08 01:00:49 -08:00 committed by Linus Torvalds
parent 49d2e9cc45
commit 7cbe34cf86
3 changed files with 20 additions and 9 deletions

View file

@ -178,7 +178,9 @@ extern int vm_swappiness;
extern int isolate_lru_page(struct page *p);
extern int putback_lru_pages(struct list_head *l);
#ifdef CONFIG_MIGRATION
extern int migrate_pages(struct list_head *l, struct list_head *t);
#endif
#ifdef CONFIG_MMU
/* linux/mm/shmem.c */

View file

@ -132,3 +132,10 @@ config SPLIT_PTLOCK_CPUS
default "4096" if ARM && !CPU_CACHE_VIPT
default "4096" if PARISC && !PA20
default "4"
#
# support for page migration
#
config MIGRATION
def_bool y if NUMA || SPARSEMEM || DISCONTIGMEM
depends on SWAP

View file

@ -568,6 +568,7 @@ static int shrink_list(struct list_head *page_list, struct scan_control *sc)
return reclaimed;
}
#ifdef CONFIG_MIGRATION
/*
* swapout a single page
* page is locked upon entry, unlocked on exit
@ -656,8 +657,9 @@ int migrate_pages(struct list_head *l, struct list_head *t)
/*
* Skip locked pages during the first two passes to give the
* functions holding the lock time to release the page. Later we use
* lock_page to have a higher chance of acquiring the lock.
* functions holding the lock time to release the page. Later we
* use lock_page() to have a higher chance of acquiring the
* lock.
*/
if (pass > 2)
lock_page(page);
@ -669,15 +671,15 @@ int migrate_pages(struct list_head *l, struct list_head *t)
* Only wait on writeback if we have already done a pass where
* we we may have triggered writeouts for lots of pages.
*/
if (pass > 0)
if (pass > 0) {
wait_on_page_writeback(page);
else
} else {
if (PageWriteback(page)) {
unlock_page(page);
goto retry_later;
}
}
#ifdef CONFIG_SWAP
if (PageAnon(page) && !PageSwapCache(page)) {
if (!add_to_swap(page)) {
unlock_page(page);
@ -686,16 +688,15 @@ int migrate_pages(struct list_head *l, struct list_head *t)
continue;
}
}
#endif /* CONFIG_SWAP */
/*
* Page is properly locked and writeback is complete.
* Try to migrate the page.
*/
if (swap_page(page)) {
if (!swap_page(page))
continue;
retry_later:
retry++;
}
retry++;
}
if (retry && pass++ < 10)
goto redo;
@ -708,6 +709,7 @@ int migrate_pages(struct list_head *l, struct list_head *t)
return nr_failed + retry;
}
#endif
/*
* zone->lru_lock is heavily contended. Some of the functions that