mm: mlock: update the interface to use folios

Update the mlock interface to accept folios rather than pages, bringing
the interface in line with the internal implementation.

munlock_vma_page() still requires a page_folio() conversion, however this
is consistent with the existent mlock_vma_page() implementation and a
product of rmap still dealing in pages rather than folios.

Link: https://lkml.kernel.org/r/cba12777c5544305014bc0cbec56bb4cc71477d8.1673526881.git.lstoakes@gmail.com
Signed-off-by: Lorenzo Stoakes <lstoakes@gmail.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Christian Brauner <brauner@kernel.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Hugh Dickins <hughd@google.com>
Cc: Joel Fernandes (Google) <joel@joelfernandes.org>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Liam R. Howlett <Liam.Howlett@oracle.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mike Rapoport (IBM) <rppt@kernel.org>
Cc: William Kucharski <william.kucharski@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Lorenzo Stoakes 2023-01-12 12:39:31 +00:00 committed by Andrew Morton
parent b213ef6b72
commit 96f97c438f
6 changed files with 49 additions and 45 deletions

View file

@ -533,10 +533,9 @@ extern int mlock_future_check(struct mm_struct *mm, unsigned long flags,
* should be called with vma's mmap_lock held for read or write, * should be called with vma's mmap_lock held for read or write,
* under page table lock for the pte/pmd being added or removed. * under page table lock for the pte/pmd being added or removed.
* *
* mlock is usually called at the end of page_add_*_rmap(), * mlock is usually called at the end of page_add_*_rmap(), munlock at
* munlock at the end of page_remove_rmap(); but new anon * the end of page_remove_rmap(); but new anon folios are managed by
* pages are managed by lru_cache_add_inactive_or_unevictable() * folio_add_lru_vma() calling mlock_new_folio().
* calling mlock_new_page().
* *
* @compound is used to include pmd mappings of THPs, but filter out * @compound is used to include pmd mappings of THPs, but filter out
* pte mappings of THPs, which cannot be consistently counted: a pte * pte mappings of THPs, which cannot be consistently counted: a pte
@ -565,18 +564,25 @@ static inline void mlock_vma_page(struct page *page,
mlock_vma_folio(page_folio(page), vma, compound); mlock_vma_folio(page_folio(page), vma, compound);
} }
void munlock_page(struct page *page); void munlock_folio(struct folio *folio);
static inline void munlock_vma_page(struct page *page,
static inline void munlock_vma_folio(struct folio *folio,
struct vm_area_struct *vma, bool compound) struct vm_area_struct *vma, bool compound)
{ {
if (unlikely(vma->vm_flags & VM_LOCKED) && if (unlikely(vma->vm_flags & VM_LOCKED) &&
(compound || !PageTransCompound(page))) (compound || !folio_test_large(folio)))
munlock_page(page); munlock_folio(folio);
} }
void mlock_new_page(struct page *page);
bool need_mlock_page_drain(int cpu); static inline void munlock_vma_page(struct page *page,
void mlock_page_drain_local(void); struct vm_area_struct *vma, bool compound)
void mlock_page_drain_remote(int cpu); {
munlock_vma_folio(page_folio(page), vma, compound);
}
void mlock_new_folio(struct folio *folio);
bool need_mlock_drain(int cpu);
void mlock_drain_local(void);
void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma); extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
@ -665,10 +671,10 @@ static inline void mlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { } struct vm_area_struct *vma, bool compound) { }
static inline void munlock_vma_page(struct page *page, static inline void munlock_vma_page(struct page *page,
struct vm_area_struct *vma, bool compound) { } struct vm_area_struct *vma, bool compound) { }
static inline void mlock_new_page(struct page *page) { } static inline void mlock_new_folio(struct folio *folio) { }
static inline bool need_mlock_page_drain(int cpu) { return false; } static inline bool need_mlock_drain(int cpu) { return false; }
static inline void mlock_page_drain_local(void) { } static inline void mlock_drain_local(void) { }
static inline void mlock_page_drain_remote(int cpu) { } static inline void mlock_drain_remote(int cpu) { }
static inline void vunmap_range_noflush(unsigned long start, unsigned long end) static inline void vunmap_range_noflush(unsigned long start, unsigned long end)
{ {
} }

View file

@ -265,7 +265,7 @@ static bool remove_migration_pte(struct folio *folio,
set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte); set_pte_at(vma->vm_mm, pvmw.address, pvmw.pte, pte);
} }
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local(); mlock_drain_local();
trace_remove_migration_pte(pvmw.address, pte_val(pte), trace_remove_migration_pte(pvmw.address, pte_val(pte),
compound_order(new)); compound_order(new));

View file

@ -210,7 +210,7 @@ static void mlock_folio_batch(struct folio_batch *fbatch)
folio_batch_reinit(fbatch); folio_batch_reinit(fbatch);
} }
void mlock_page_drain_local(void) void mlock_drain_local(void)
{ {
struct folio_batch *fbatch; struct folio_batch *fbatch;
@ -221,7 +221,7 @@ void mlock_page_drain_local(void)
local_unlock(&mlock_fbatch.lock); local_unlock(&mlock_fbatch.lock);
} }
void mlock_page_drain_remote(int cpu) void mlock_drain_remote(int cpu)
{ {
struct folio_batch *fbatch; struct folio_batch *fbatch;
@ -231,7 +231,7 @@ void mlock_page_drain_remote(int cpu)
mlock_folio_batch(fbatch); mlock_folio_batch(fbatch);
} }
bool need_mlock_page_drain(int cpu) bool need_mlock_drain(int cpu)
{ {
return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu)); return folio_batch_count(&per_cpu(mlock_fbatch.fbatch, cpu));
} }
@ -262,13 +262,12 @@ void mlock_folio(struct folio *folio)
} }
/** /**
* mlock_new_page - mlock a newly allocated page not yet on LRU * mlock_new_folio - mlock a newly allocated folio not yet on LRU
* @page: page to be mlocked, either a normal page or a THP head. * @folio: folio to be mlocked, either normal or a THP head.
*/ */
void mlock_new_page(struct page *page) void mlock_new_folio(struct folio *folio)
{ {
struct folio_batch *fbatch; struct folio_batch *fbatch;
struct folio *folio = page_folio(page);
int nr_pages = folio_nr_pages(folio); int nr_pages = folio_nr_pages(folio);
local_lock(&mlock_fbatch.lock); local_lock(&mlock_fbatch.lock);
@ -286,13 +285,12 @@ void mlock_new_page(struct page *page)
} }
/** /**
* munlock_page - munlock a page * munlock_folio - munlock a folio
* @page: page to be munlocked, either a normal page or a THP head. * @folio: folio to be munlocked, either normal or a THP head.
*/ */
void munlock_page(struct page *page) void munlock_folio(struct folio *folio)
{ {
struct folio_batch *fbatch; struct folio_batch *fbatch;
struct folio *folio = page_folio(page);
local_lock(&mlock_fbatch.lock); local_lock(&mlock_fbatch.lock);
fbatch = this_cpu_ptr(&mlock_fbatch.fbatch); fbatch = this_cpu_ptr(&mlock_fbatch.fbatch);
@ -314,7 +312,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma; struct vm_area_struct *vma = walk->vma;
spinlock_t *ptl; spinlock_t *ptl;
pte_t *start_pte, *pte; pte_t *start_pte, *pte;
struct page *page; struct folio *folio;
ptl = pmd_trans_huge_lock(pmd, vma); ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) { if (ptl) {
@ -322,11 +320,11 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
goto out; goto out;
if (is_huge_zero_pmd(*pmd)) if (is_huge_zero_pmd(*pmd))
goto out; goto out;
page = pmd_page(*pmd); folio = page_folio(pmd_page(*pmd));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_folio(page_folio(page)); mlock_folio(folio);
else else
munlock_page(page); munlock_folio(folio);
goto out; goto out;
} }
@ -334,15 +332,15 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) { for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte)) if (!pte_present(*pte))
continue; continue;
page = vm_normal_page(vma, addr, *pte); folio = vm_normal_folio(vma, addr, *pte);
if (!page || is_zone_device_page(page)) if (!folio || folio_is_zone_device(folio))
continue; continue;
if (PageTransCompound(page)) if (folio_test_large(folio))
continue; continue;
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_folio(page_folio(page)); mlock_folio(folio);
else else
munlock_page(page); munlock_folio(folio);
} }
pte_unmap(start_pte); pte_unmap(start_pte);
out: out:

View file

@ -8587,7 +8587,7 @@ static int page_alloc_cpu_dead(unsigned int cpu)
struct zone *zone; struct zone *zone;
lru_add_drain_cpu(cpu); lru_add_drain_cpu(cpu);
mlock_page_drain_remote(cpu); mlock_drain_remote(cpu);
drain_pages(cpu); drain_pages(cpu);
/* /*

View file

@ -1764,7 +1764,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local(); mlock_drain_local();
folio_put(folio); folio_put(folio);
} }
@ -2105,7 +2105,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
*/ */
page_remove_rmap(subpage, vma, folio_test_hugetlb(folio)); page_remove_rmap(subpage, vma, folio_test_hugetlb(folio));
if (vma->vm_flags & VM_LOCKED) if (vma->vm_flags & VM_LOCKED)
mlock_page_drain_local(); mlock_drain_local();
folio_put(folio); folio_put(folio);
} }

View file

@ -562,7 +562,7 @@ void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma)
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED))
mlock_new_page(&folio->page); mlock_new_folio(folio);
else else
folio_add_lru(folio); folio_add_lru(folio);
} }
@ -781,7 +781,7 @@ void lru_add_drain(void)
local_lock(&cpu_fbatches.lock); local_lock(&cpu_fbatches.lock);
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock); local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local(); mlock_drain_local();
} }
/* /*
@ -796,7 +796,7 @@ static void lru_add_and_bh_lrus_drain(void)
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
local_unlock(&cpu_fbatches.lock); local_unlock(&cpu_fbatches.lock);
invalidate_bh_lrus_cpu(); invalidate_bh_lrus_cpu();
mlock_page_drain_local(); mlock_drain_local();
} }
void lru_add_drain_cpu_zone(struct zone *zone) void lru_add_drain_cpu_zone(struct zone *zone)
@ -805,7 +805,7 @@ void lru_add_drain_cpu_zone(struct zone *zone)
lru_add_drain_cpu(smp_processor_id()); lru_add_drain_cpu(smp_processor_id());
drain_local_pages(zone); drain_local_pages(zone);
local_unlock(&cpu_fbatches.lock); local_unlock(&cpu_fbatches.lock);
mlock_page_drain_local(); mlock_drain_local();
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -828,7 +828,7 @@ static bool cpu_needs_drain(unsigned int cpu)
folio_batch_count(&fbatches->lru_deactivate) || folio_batch_count(&fbatches->lru_deactivate) ||
folio_batch_count(&fbatches->lru_lazyfree) || folio_batch_count(&fbatches->lru_lazyfree) ||
folio_batch_count(&fbatches->activate) || folio_batch_count(&fbatches->activate) ||
need_mlock_page_drain(cpu) || need_mlock_drain(cpu) ||
has_bh_in_lru(cpu, NULL); has_bh_in_lru(cpu, NULL);
} }