mm: Turn deactivate_file_page() into deactivate_file_folio()

This function has one caller which already has a reference to the
page, so we don't need to use get_page_unless_zero().  Also move the
prototype to mm/internal.h.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>
This commit is contained in:
Matthew Wilcox (Oracle) 2022-02-13 16:40:24 -05:00
parent b4545f4653
commit 261b6840ed
4 changed files with 20 additions and 19 deletions

View file

@ -372,7 +372,6 @@ extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);

View file

@ -66,6 +66,7 @@ static inline void wake_throttle_isolated(pg_data_t *pgdat)
vm_fault_t do_swap_page(struct vm_fault *vmf);
void folio_rotate_reclaimable(struct folio *folio);
bool __folio_end_writeback(struct folio *folio);
void deactivate_file_folio(struct folio *folio);
void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
unsigned long floor, unsigned long ceiling);

View file

@ -630,32 +630,33 @@ void lru_add_drain_cpu(int cpu)
}
/**
* deactivate_file_page - forcefully deactivate a file page
* @page: page to deactivate
* deactivate_file_folio() - Forcefully deactivate a file folio.
* @folio: Folio to deactivate.
*
* This function hints the VM that @page is a good reclaim candidate,
* for example if its invalidation fails due to the page being dirty
* This function hints to the VM that @folio is a good reclaim candidate,
* for example if its invalidation fails due to the folio being dirty
* or under writeback.
*
* Context: Caller holds a reference on the page.
*/
void deactivate_file_page(struct page *page)
void deactivate_file_folio(struct folio *folio)
{
struct pagevec *pvec;
/*
* In a workload with many unevictable page such as mprotect,
* unevictable page deactivation for accelerating reclaim is pointless.
* In a workload with many unevictable pages such as mprotect,
* unevictable folio deactivation for accelerating reclaim is pointless.
*/
if (PageUnevictable(page))
if (folio_test_unevictable(folio))
return;
if (likely(get_page_unless_zero(page))) {
struct pagevec *pvec;
folio_get(folio);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file);
if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
local_unlock(&lru_pvecs.lock);
}
if (pagevec_add_and_need_flush(pvec, &folio->page))
pagevec_lru_move_fn(pvec, lru_deactivate_file_fn);
local_unlock(&lru_pvecs.lock);
}
/*

View file

@ -527,7 +527,7 @@ static unsigned long __invalidate_mapping_pages(struct address_space *mapping,
* of interest and try to speed up its reclaim.
*/
if (!ret) {
deactivate_file_page(&folio->page);
deactivate_file_folio(folio);
/* It is likely on the pagevec of a remote CPU */
if (nr_pagevec)
(*nr_pagevec)++;