mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
s390/hugetlb: remove dead code for sw emulated huge pages
We now support only hugepages on hardware with EDAT1 support. So we remove the prepare/release_hugepage hooks and simplify set_huge_pte_at and huge_ptep_get. Signed-off-by: Dominik Dingel <dingel@linux.vnet.ibm.com> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
08bd4fc156
commit
ce415712cf
2 changed files with 3 additions and 60 deletions
|
@ -37,9 +37,6 @@ static inline int prepare_hugepage_range(struct file *file,
|
|||
|
||||
#define arch_clear_hugepage_flags(page) do { } while (0)
|
||||
|
||||
int arch_prepare_hugepage(struct page *page);
|
||||
void arch_release_hugepage(struct page *page);
|
||||
|
||||
static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep)
|
||||
{
|
||||
|
|
|
@ -86,31 +86,16 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
|
|||
void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
|
||||
pte_t *ptep, pte_t pte)
|
||||
{
|
||||
pmd_t pmd;
|
||||
pmd_t pmd = __pte_to_pmd(pte);
|
||||
|
||||
pmd = __pte_to_pmd(pte);
|
||||
if (!MACHINE_HAS_HPAGE) {
|
||||
/* Emulated huge ptes loose the dirty and young bit */
|
||||
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) |= pte_page(pte)[1].index;
|
||||
} else
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
|
||||
*(pmd_t *) ptep = pmd;
|
||||
}
|
||||
|
||||
pte_t huge_ptep_get(pte_t *ptep)
|
||||
{
|
||||
unsigned long origin;
|
||||
pmd_t pmd;
|
||||
pmd_t pmd = *(pmd_t *) ptep;
|
||||
|
||||
pmd = *(pmd_t *) ptep;
|
||||
if (!MACHINE_HAS_HPAGE && pmd_present(pmd)) {
|
||||
origin = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) &= ~_SEGMENT_ENTRY_ORIGIN;
|
||||
pmd_val(pmd) |= *(unsigned long *) origin;
|
||||
/* Emulated huge ptes are young and dirty by definition */
|
||||
pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG | _SEGMENT_ENTRY_DIRTY;
|
||||
}
|
||||
return __pmd_to_pte(pmd);
|
||||
}
|
||||
|
||||
|
@ -125,45 +110,6 @@ pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
|
|||
return pte;
|
||||
}
|
||||
|
||||
int arch_prepare_hugepage(struct page *page)
|
||||
{
|
||||
unsigned long addr = page_to_phys(page);
|
||||
pte_t pte;
|
||||
pte_t *ptep;
|
||||
int i;
|
||||
|
||||
if (MACHINE_HAS_HPAGE)
|
||||
return 0;
|
||||
|
||||
ptep = (pte_t *) pte_alloc_one(&init_mm, addr);
|
||||
if (!ptep)
|
||||
return -ENOMEM;
|
||||
|
||||
pte_val(pte) = addr;
|
||||
for (i = 0; i < PTRS_PER_PTE; i++) {
|
||||
set_pte_at(&init_mm, addr + i * PAGE_SIZE, ptep + i, pte);
|
||||
pte_val(pte) += PAGE_SIZE;
|
||||
}
|
||||
page[1].index = (unsigned long) ptep;
|
||||
return 0;
|
||||
}
|
||||
|
||||
void arch_release_hugepage(struct page *page)
|
||||
{
|
||||
pte_t *ptep;
|
||||
|
||||
if (MACHINE_HAS_HPAGE)
|
||||
return;
|
||||
|
||||
ptep = (pte_t *) page[1].index;
|
||||
if (!ptep)
|
||||
return;
|
||||
clear_table((unsigned long *) ptep, _PAGE_INVALID,
|
||||
PTRS_PER_PTE * sizeof(pte_t));
|
||||
page_table_free(&init_mm, (unsigned long *) ptep);
|
||||
page[1].index = 0;
|
||||
}
|
||||
|
||||
pte_t *huge_pte_alloc(struct mm_struct *mm,
|
||||
unsigned long addr, unsigned long sz)
|
||||
{
|
||||
|
|
Loading…
Reference in a new issue