linux/mm/page_vma_mapped.c
Punit Agrawal 7868a2087e mm/hugetlb: add size parameter to huge_pte_offset()
A poisoned or migrated hugepage is stored as a swap entry in the page
tables.  On architectures that support hugepages consisting of
contiguous page table entries (such as on arm64) this leads to ambiguity
in determining the page table entry to return in huge_pte_offset() when
a poisoned entry is encountered.

Let's remove the ambiguity by adding a size parameter to convey
additional information about the requested address.  Also fixup the
definition/usage of huge_pte_offset() throughout the tree.

Link: http://lkml.kernel.org/r/20170522133604.11392-4-punit.agrawal@arm.com
Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
Acked-by: Steve Capper <steve.capper@arm.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Tony Luck <tony.luck@intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: James Hogan <james.hogan@imgtec.com> (odd fixer:METAG ARCHITECTURE)
Cc: Ralf Baechle <ralf@linux-mips.org> (supporter:MIPS)
Cc: "James E.J. Bottomley" <jejb@parisc-linux.org>
Cc: Helge Deller <deller@gmx.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Cc: Rich Felker <dalias@libc.org>
Cc: "David S. Miller" <davem@davemloft.net>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: "Aneesh Kumar K.V" <aneesh.kumar@linux.vnet.ibm.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2017-07-06 16:24:34 -07:00

225 lines
5.8 KiB
C

#include <linux/mm.h>
#include <linux/rmap.h>
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <linux/swapops.h>
#include "internal.h"
static inline bool check_pmd(struct page_vma_mapped_walk *pvmw)
{
pmd_t pmde;
/*
* Make sure we don't re-load pmd between present and !trans_huge check.
* We need a consistent view.
*/
pmde = READ_ONCE(*pvmw->pmd);
return pmd_present(pmde) && !pmd_trans_huge(pmde);
}
static inline bool not_found(struct page_vma_mapped_walk *pvmw)
{
page_vma_mapped_walk_done(pvmw);
return false;
}
static bool map_pte(struct page_vma_mapped_walk *pvmw)
{
pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
if (!(pvmw->flags & PVMW_SYNC)) {
if (pvmw->flags & PVMW_MIGRATION) {
if (!is_swap_pte(*pvmw->pte))
return false;
} else {
if (!pte_present(*pvmw->pte))
return false;
}
}
pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
spin_lock(pvmw->ptl);
return true;
}
static bool check_pte(struct page_vma_mapped_walk *pvmw)
{
if (pvmw->flags & PVMW_MIGRATION) {
#ifdef CONFIG_MIGRATION
swp_entry_t entry;
if (!is_swap_pte(*pvmw->pte))
return false;
entry = pte_to_swp_entry(*pvmw->pte);
if (!is_migration_entry(entry))
return false;
if (migration_entry_to_page(entry) - pvmw->page >=
hpage_nr_pages(pvmw->page)) {
return false;
}
if (migration_entry_to_page(entry) < pvmw->page)
return false;
#else
WARN_ON_ONCE(1);
#endif
} else {
if (!pte_present(*pvmw->pte))
return false;
/* THP can be referenced by any subpage */
if (pte_page(*pvmw->pte) - pvmw->page >=
hpage_nr_pages(pvmw->page)) {
return false;
}
if (pte_page(*pvmw->pte) < pvmw->page)
return false;
}
return true;
}
/**
* page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
* @pvmw->address
* @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
* must be set. pmd, pte and ptl must be NULL.
*
* Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
* to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
* adjusted if needed (for PTE-mapped THPs).
*
* If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
* (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
* a loop to find all PTEs that map the THP.
*
* For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
* regardless of which page table level the page is mapped at. @pvmw->pmd is
* NULL.
*
* Retruns false if there are no more page table entries for the page in
* the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
*
* If you need to stop the walk before page_vma_mapped_walk() returned false,
* use page_vma_mapped_walk_done(). It will do the housekeeping.
*/
bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
{
struct mm_struct *mm = pvmw->vma->vm_mm;
struct page *page = pvmw->page;
pgd_t *pgd;
p4d_t *p4d;
pud_t *pud;
/* The only possible pmd mapping has been handled on last iteration */
if (pvmw->pmd && !pvmw->pte)
return not_found(pvmw);
if (pvmw->pte)
goto next_pte;
if (unlikely(PageHuge(pvmw->page))) {
/* when pud is not present, pte will be NULL */
pvmw->pte = huge_pte_offset(mm, pvmw->address,
PAGE_SIZE << compound_order(page));
if (!pvmw->pte)
return false;
pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
spin_lock(pvmw->ptl);
if (!check_pte(pvmw))
return not_found(pvmw);
return true;
}
restart:
pgd = pgd_offset(mm, pvmw->address);
if (!pgd_present(*pgd))
return false;
p4d = p4d_offset(pgd, pvmw->address);
if (!p4d_present(*p4d))
return false;
pud = pud_offset(p4d, pvmw->address);
if (!pud_present(*pud))
return false;
pvmw->pmd = pmd_offset(pud, pvmw->address);
if (pmd_trans_huge(*pvmw->pmd)) {
pvmw->ptl = pmd_lock(mm, pvmw->pmd);
if (!pmd_present(*pvmw->pmd))
return not_found(pvmw);
if (likely(pmd_trans_huge(*pvmw->pmd))) {
if (pvmw->flags & PVMW_MIGRATION)
return not_found(pvmw);
if (pmd_page(*pvmw->pmd) != page)
return not_found(pvmw);
return true;
} else {
/* THP pmd was split under us: handle on pte level */
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
} else {
if (!check_pmd(pvmw))
return false;
}
if (!map_pte(pvmw))
goto next_pte;
while (1) {
if (check_pte(pvmw))
return true;
next_pte:
/* Seek to next pte only makes sense for THP */
if (!PageTransHuge(pvmw->page) || PageHuge(pvmw->page))
return not_found(pvmw);
do {
pvmw->address += PAGE_SIZE;
if (pvmw->address >= pvmw->vma->vm_end ||
pvmw->address >=
__vma_address(pvmw->page, pvmw->vma) +
hpage_nr_pages(pvmw->page) * PAGE_SIZE)
return not_found(pvmw);
/* Did we cross page table boundary? */
if (pvmw->address % PMD_SIZE == 0) {
pte_unmap(pvmw->pte);
if (pvmw->ptl) {
spin_unlock(pvmw->ptl);
pvmw->ptl = NULL;
}
goto restart;
} else {
pvmw->pte++;
}
} while (pte_none(*pvmw->pte));
if (!pvmw->ptl) {
pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
spin_lock(pvmw->ptl);
}
}
}
/**
* page_mapped_in_vma - check whether a page is really mapped in a VMA
* @page: the page to test
* @vma: the VMA to test
*
* Returns 1 if the page is mapped into the page tables of the VMA, 0
* if the page is not mapped into the page tables of this VMA. Only
* valid for normal file or anonymous VMAs.
*/
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
{
struct page_vma_mapped_walk pvmw = {
.page = page,
.vma = vma,
.flags = PVMW_SYNC,
};
unsigned long start, end;
start = __vma_address(page, vma);
end = start + PAGE_SIZE * (hpage_nr_pages(page) - 1);
if (unlikely(end < vma->vm_start || start >= vma->vm_end))
return 0;
pvmw.address = max(start, vma->vm_start);
if (!page_vma_mapped_walk(&pvmw))
return 0;
page_vma_mapped_walk_done(&pvmw);
return 1;
}