mm: ksm: use more folio api in ksm_might_need_to_copy()

Patch series "mm: cleanup and use more folio in page fault", v3.

Rename page_copy_prealloc() to folio_prealloc(), which is used by more
functions, also do more folio conversion in page fault.


This patch (of 5):

Since ksm only support normal page, no swapout/in for ksm large folio too,
add large folio check in ksm_might_need_to_copy(), also convert
page->index to folio->index as page->index is going away.

Then convert ksm_might_need_to_copy() to use more folio api to save nine
compound_head() calls, short 'address' to reduce max-line-length.

Link: https://lkml.kernel.org/r/20231118023232.1409103-1-wangkefeng.wang@huawei.com
Link: https://lkml.kernel.org/r/20231118023232.1409103-2-wangkefeng.wang@huawei.com
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Vishal Moola (Oracle) <vishal.moola@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Kefeng Wang 2023-11-18 10:32:28 +08:00 committed by Andrew Morton
parent 6140edeea8
commit 1486fb5013
2 changed files with 23 additions and 20 deletions

View file

@ -77,7 +77,7 @@ static inline void ksm_exit(struct mm_struct *mm)
* but what if the vma was unmerged while the page was swapped out?
*/
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
struct vm_area_struct *vma, unsigned long addr);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
@ -130,7 +130,7 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
}
static inline struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
struct vm_area_struct *vma, unsigned long addr)
{
return page;
}

View file

@ -2876,48 +2876,51 @@ void __ksm_exit(struct mm_struct *mm)
}
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address)
struct vm_area_struct *vma, unsigned long addr)
{
struct folio *folio = page_folio(page);
struct anon_vma *anon_vma = folio_anon_vma(folio);
struct page *new_page;
struct folio *new_folio;
if (PageKsm(page)) {
if (page_stable_node(page) &&
if (folio_test_large(folio))
return page;
if (folio_test_ksm(folio)) {
if (folio_stable_node(folio) &&
!(ksm_run & KSM_RUN_UNMERGE))
return page; /* no need to copy it */
} else if (!anon_vma) {
return page; /* no need to copy it */
} else if (page->index == linear_page_index(vma, address) &&
} else if (folio->index == linear_page_index(vma, addr) &&
anon_vma->root == vma->anon_vma->root) {
return page; /* still no need to copy it */
}
if (PageHWPoison(page))
return ERR_PTR(-EHWPOISON);
if (!PageUptodate(page))
if (!folio_test_uptodate(folio))
return page; /* let do_swap_page report the error */
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
if (new_page &&
mem_cgroup_charge(page_folio(new_page), vma->vm_mm, GFP_KERNEL)) {
put_page(new_page);
new_page = NULL;
new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, addr, false);
if (new_folio &&
mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL)) {
folio_put(new_folio);
new_folio = NULL;
}
if (new_page) {
if (copy_mc_user_highpage(new_page, page, address, vma)) {
put_page(new_page);
if (new_folio) {
if (copy_mc_user_highpage(&new_folio->page, page, addr, vma)) {
folio_put(new_folio);
memory_failure_queue(page_to_pfn(page), 0);
return ERR_PTR(-EHWPOISON);
}
SetPageDirty(new_page);
__SetPageUptodate(new_page);
__SetPageLocked(new_page);
folio_set_dirty(new_folio);
__folio_mark_uptodate(new_folio);
__folio_set_locked(new_folio);
#ifdef CONFIG_SWAP
count_vm_event(KSM_SWPIN_COPY);
#endif
}
return new_page;
return new_folio ? &new_folio->page : NULL;
}
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)