mm/ksm: use folio in stable_node_dup

Use ksm_get_folio() and save 2 compound_head calls.

Link: https://lkml.kernel.org/r/20240411061713.1847574-6-alexs@kernel.org
Signed-off-by: Alex Shi (tencent) <alexs@kernel.org>
Reviewed-by: David Hildenbrand <david@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Chris Wright <chrisw@sous-sol.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Alex Shi (tencent) 2024-04-11 14:17:06 +08:00 committed by Andrew Morton
parent 9d5cc14093
commit 6f528de298

View file

@ -1638,7 +1638,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
{ {
struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node; struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
struct hlist_node *hlist_safe; struct hlist_node *hlist_safe;
struct page *_tree_page, *tree_page = NULL; struct folio *folio, *tree_folio = NULL;
int nr = 0; int nr = 0;
int found_rmap_hlist_len; int found_rmap_hlist_len;
@ -1657,24 +1657,24 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
* We must walk all stable_node_dup to prune the stale * We must walk all stable_node_dup to prune the stale
* stable nodes during lookup. * stable nodes during lookup.
* *
* get_ksm_page can drop the nodes from the * ksm_get_folio can drop the nodes from the
* stable_node->hlist if they point to freed pages * stable_node->hlist if they point to freed pages
* (that's why we do a _safe walk). The "dup" * (that's why we do a _safe walk). The "dup"
* stable_node parameter itself will be freed from * stable_node parameter itself will be freed from
* under us if it returns NULL. * under us if it returns NULL.
*/ */
_tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK); folio = ksm_get_folio(dup, GET_KSM_PAGE_NOLOCK);
if (!_tree_page) if (!folio)
continue; continue;
nr += 1; nr += 1;
if (is_page_sharing_candidate(dup)) { if (is_page_sharing_candidate(dup)) {
if (!found || if (!found ||
dup->rmap_hlist_len > found_rmap_hlist_len) { dup->rmap_hlist_len > found_rmap_hlist_len) {
if (found) if (found)
put_page(tree_page); folio_put(tree_folio);
found = dup; found = dup;
found_rmap_hlist_len = found->rmap_hlist_len; found_rmap_hlist_len = found->rmap_hlist_len;
tree_page = _tree_page; tree_folio = folio;
/* skip put_page for found dup */ /* skip put_page for found dup */
if (!prune_stale_stable_nodes) if (!prune_stale_stable_nodes)
@ -1682,7 +1682,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
continue; continue;
} }
} }
put_page(_tree_page); folio_put(folio);
} }
if (found) { if (found) {
@ -1747,7 +1747,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
} }
*_stable_node_dup = found; *_stable_node_dup = found;
return tree_page; return &tree_folio->page;
} }
static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node, static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,