Merge branch 'for-next/mm' into for-next/core

Lots of cleanup to our various page-table definitions, but also some
non-critical fixes and removal of some unnecessary memory types. The
most interesting change here is the reduction of ARCH_DMA_MINALIGN back
to 64 bytes, since we're not aware of any machines that need a higher
value with the way the code is structured (only needed for non-coherent
DMA).

* for-next/mm:
  arm64: tlb: fix the TTL value of tlb_get_level
  arm64/mm: Rename ARM64_SWAPPER_USES_SECTION_MAPS
  arm64: head: fix code comments in set_cpu_boot_mode_flag
  arm64: mm: drop unused __pa(__idmap_text_start)
  arm64: mm: fix the count comments in compute_indices
  arm64/mm: Fix ttbr0 values stored in struct thread_info for software-pan
  arm64: mm: Pass original fault address to handle_mm_fault()
  arm64/mm: Drop SECTION_[SHIFT|SIZE|MASK]
  arm64/mm: Use CONT_PMD_SHIFT for ARM64_MEMSTART_SHIFT
  arm64/mm: Drop SWAPPER_INIT_MAP_SIZE
  arm64: mm: decode xFSC in mem_abort_decode()
  arm64: mm: Add is_el1_data_abort() helper
  arm64: cache: Lower ARCH_DMA_MINALIGN to 64 (L1_CACHE_BYTES)
  arm64: mm: Remove unused support for Normal-WT memory type
  arm64: acpi: Map EFI_MEMORY_WT memory as Normal-NC
  arm64: mm: Remove unused support for Device-GRE memory type
  arm64: mm: Use better bitmap_zalloc()
  arm64/mm: Make vmemmap_free() available only with CONFIG_MEMORY_HOTPLUG
  arm64/mm: Remove [PUD|PMD]_TABLE_BIT from [pud|pmd]_bad()
  arm64/mm: Validate CONFIG_PGTABLE_LEVELS
This commit is contained in:
Will Deacon 2021-06-24 14:04:33 +01:00
commit 81ad4bb1fe
18 changed files with 66 additions and 62 deletions

View file

@ -47,7 +47,7 @@
* cache before the transfer is done, causing old data to be seen by
* the CPU.
*/
#define ARCH_DMA_MINALIGN (128)
#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
#ifdef CONFIG_KASAN_SW_TAGS
#define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT)

View file

@ -18,9 +18,9 @@
* 64K (section size = 512M).
*/
#ifdef CONFIG_ARM64_4K_PAGES
#define ARM64_SWAPPER_USES_SECTION_MAPS 1
#define ARM64_KERNEL_USES_PMD_MAPS 1
#else
#define ARM64_SWAPPER_USES_SECTION_MAPS 0
#define ARM64_KERNEL_USES_PMD_MAPS 0
#endif
/*
@ -33,7 +33,7 @@
* VA range, so pages required to map highest possible PA are reserved in all
* cases.
*/
#if ARM64_SWAPPER_USES_SECTION_MAPS
#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_PGTABLE_LEVELS (CONFIG_PGTABLE_LEVELS - 1)
#define IDMAP_PGTABLE_LEVELS (ARM64_HW_PGTABLE_LEVELS(PHYS_MASK_SHIFT) - 1)
#else
@ -90,9 +90,9 @@
#define IDMAP_DIR_SIZE (IDMAP_PGTABLE_LEVELS * PAGE_SIZE)
/* Initial memory map size */
#if ARM64_SWAPPER_USES_SECTION_MAPS
#define SWAPPER_BLOCK_SHIFT SECTION_SHIFT
#define SWAPPER_BLOCK_SIZE SECTION_SIZE
#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_BLOCK_SHIFT PMD_SHIFT
#define SWAPPER_BLOCK_SIZE PMD_SIZE
#define SWAPPER_TABLE_SHIFT PUD_SHIFT
#else
#define SWAPPER_BLOCK_SHIFT PAGE_SHIFT
@ -100,16 +100,13 @@
#define SWAPPER_TABLE_SHIFT PMD_SHIFT
#endif
/* The size of the initial kernel direct mapping */
#define SWAPPER_INIT_MAP_SIZE (_AC(1, UL) << SWAPPER_TABLE_SHIFT)
/*
* Initial memory map attributes.
*/
#define SWAPPER_PTE_FLAGS (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
#define SWAPPER_PMD_FLAGS (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
#if ARM64_SWAPPER_USES_SECTION_MAPS
#if ARM64_KERNEL_USES_PMD_MAPS
#define SWAPPER_MM_MMUFLAGS (PMD_ATTRINDX(MT_NORMAL) | SWAPPER_PMD_FLAGS)
#else
#define SWAPPER_MM_MMUFLAGS (PTE_ATTRINDX(MT_NORMAL) | SWAPPER_PTE_FLAGS)
@ -125,7 +122,7 @@
#if defined(CONFIG_ARM64_4K_PAGES)
#define ARM64_MEMSTART_SHIFT PUD_SHIFT
#elif defined(CONFIG_ARM64_16K_PAGES)
#define ARM64_MEMSTART_SHIFT (PMD_SHIFT + 5)
#define ARM64_MEMSTART_SHIFT CONT_PMD_SHIFT
#else
#define ARM64_MEMSTART_SHIFT PMD_SHIFT
#endif

View file

@ -135,10 +135,8 @@
#define MT_NORMAL 0
#define MT_NORMAL_TAGGED 1
#define MT_NORMAL_NC 2
#define MT_NORMAL_WT 3
#define MT_DEVICE_nGnRnE 4
#define MT_DEVICE_nGnRE 5
#define MT_DEVICE_GRE 6
#define MT_DEVICE_nGnRnE 3
#define MT_DEVICE_nGnRE 4
/*
* Memory types for Stage-2 translation

View file

@ -177,9 +177,9 @@ static inline void update_saved_ttbr0(struct task_struct *tsk,
return;
if (mm == &init_mm)
ttbr = __pa_symbol(reserved_pg_dir);
ttbr = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
else
ttbr = virt_to_phys(mm->pgd) | ASID(mm) << 48;
ttbr = phys_to_ttbr(virt_to_phys(mm->pgd)) | ASID(mm) << 48;
WRITE_ONCE(task_thread_info(tsk)->ttbr0, ttbr);
}

View file

@ -71,13 +71,6 @@
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1 << (VA_BITS - PGDIR_SHIFT))
/*
* Section address mask and size definitions.
*/
#define SECTION_SHIFT PMD_SHIFT
#define SECTION_SIZE (_AC(1, UL) << SECTION_SHIFT)
#define SECTION_MASK (~(SECTION_SIZE-1))
/*
* Contiguous page definitions.
*/

View file

@ -55,7 +55,6 @@ extern bool arm64_use_ng_mappings;
#define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
#define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
#define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
#define PROT_NORMAL_WT (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_WT))
#define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
#define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))

View file

@ -511,13 +511,12 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) (!(pmd_val(pmd) & PMD_TABLE_BIT))
#define pmd_table(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_TABLE)
#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
PMD_TYPE_SECT)
#define pmd_leaf(pmd) pmd_sect(pmd)
#define pmd_bad(pmd) (!pmd_table(pmd))
#define pmd_leaf_size(pmd) (pmd_cont(pmd) ? CONT_PMD_SIZE : PMD_SIZE)
#define pte_leaf_size(pte) (pte_cont(pte) ? CONT_PTE_SIZE : PAGE_SIZE)
@ -604,7 +603,7 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
pr_err("%s:%d: bad pmd %016llx.\n", __FILE__, __LINE__, pmd_val(e))
#define pud_none(pud) (!pud_val(pud))
#define pud_bad(pud) (!(pud_val(pud) & PUD_TABLE_BIT))
#define pud_bad(pud) (!pud_table(pud))
#define pud_present(pud) pte_present(pud_pte(pud))
#define pud_leaf(pud) pud_sect(pud)
#define pud_valid(pud) pte_valid(pud_pte(pud))

View file

@ -703,9 +703,7 @@
/* MAIR_ELx memory attributes (used by Linux) */
#define MAIR_ATTR_DEVICE_nGnRnE UL(0x00)
#define MAIR_ATTR_DEVICE_nGnRE UL(0x04)
#define MAIR_ATTR_DEVICE_GRE UL(0x0c)
#define MAIR_ATTR_NORMAL_NC UL(0x44)
#define MAIR_ATTR_NORMAL_WT UL(0xbb)
#define MAIR_ATTR_NORMAL_TAGGED UL(0xf0)
#define MAIR_ATTR_NORMAL UL(0xff)
#define MAIR_ATTR_MASK UL(0xff)

View file

@ -28,6 +28,10 @@ static void tlb_flush(struct mmu_gather *tlb);
*/
static inline int tlb_get_level(struct mmu_gather *tlb)
{
/* The TTL field is only valid for the leaf entry. */
if (tlb->freed_tables)
return 0;
if (tlb->cleared_ptes && !(tlb->cleared_pmds ||
tlb->cleared_puds ||
tlb->cleared_p4ds))

View file

@ -239,6 +239,18 @@ void __init acpi_boot_table_init(void)
}
}
static pgprot_t __acpi_get_writethrough_mem_attribute(void)
{
/*
* Although UEFI specifies the use of Normal Write-through for
* EFI_MEMORY_WT, it is seldom used in practice and not implemented
* by most (all?) CPUs. Rather than allocate a MAIR just for this
* purpose, emit a warning and use Normal Non-cacheable instead.
*/
pr_warn_once("No MAIR allocation for EFI_MEMORY_WT; treating as Normal Non-cacheable\n");
return __pgprot(PROT_NORMAL_NC);
}
pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
{
/*
@ -246,7 +258,7 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
* types" of UEFI 2.5 section 2.3.6.1, each EFI memory type is
* mapped to a corresponding MAIR attribute encoding.
* The EFI memory attribute advises all possible capabilities
* of a memory region. We use the most efficient capability.
* of a memory region.
*/
u64 attr;
@ -254,10 +266,10 @@ pgprot_t __acpi_get_mem_attribute(phys_addr_t addr)
attr = efi_mem_attributes(addr);
if (attr & EFI_MEMORY_WB)
return PAGE_KERNEL;
if (attr & EFI_MEMORY_WT)
return __pgprot(PROT_NORMAL_WT);
if (attr & EFI_MEMORY_WC)
return __pgprot(PROT_NORMAL_NC);
if (attr & EFI_MEMORY_WT)
return __acpi_get_writethrough_mem_attribute();
return __pgprot(PROT_DEVICE_nGnRnE);
}
@ -340,10 +352,10 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
default:
if (region->attribute & EFI_MEMORY_WB)
prot = PAGE_KERNEL;
else if (region->attribute & EFI_MEMORY_WT)
prot = __pgprot(PROT_NORMAL_WT);
else if (region->attribute & EFI_MEMORY_WC)
prot = __pgprot(PROT_NORMAL_NC);
else if (region->attribute & EFI_MEMORY_WT)
prot = __acpi_get_writethrough_mem_attribute();
}
}
return __ioremap(phys, size, prot);

View file

@ -196,7 +196,7 @@ SYM_CODE_END(preserve_boot_args)
and \iend, \iend, \istart // iend = (vend >> shift) & (ptrs - 1)
mov \istart, \ptrs
mul \istart, \istart, \count
add \iend, \iend, \istart // iend += (count - 1) * ptrs
add \iend, \iend, \istart // iend += count * ptrs
// our entries span multiple tables
lsr \istart, \vstart, \shift
@ -354,7 +354,6 @@ SYM_FUNC_START_LOCAL(__create_page_tables)
#endif
1:
ldr_l x4, idmap_ptrs_per_pgd
mov x5, x3 // __pa(__idmap_text_start)
adr_l x6, __idmap_text_end // __pa(__idmap_text_end)
map_memory x0, x1, x3, x6, x7, x3, x4, x10, x11, x12, x13, x14
@ -568,7 +567,7 @@ SYM_FUNC_START_LOCAL(set_cpu_boot_mode_flag)
cmp w0, #BOOT_CPU_MODE_EL2
b.ne 1f
add x1, x1, #4
1: str w0, [x1] // This CPU has booted in EL1
1: str w0, [x1] // Save CPU boot mode
dmb sy
dc ivac, x1 // Invalidate potentially stale cache line
ret

View file

@ -375,7 +375,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p)
* faults in case uaccess_enable() is inadvertently called by the init
* thread.
*/
init_task.thread_info.ttbr0 = __pa_symbol(reserved_pg_dir);
init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
#endif
if (boot_args[1] || boot_args[2] || boot_args[3]) {

View file

@ -402,14 +402,12 @@ static int asids_init(void)
{
asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
GFP_KERNEL);
asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
if (!asid_map)
panic("Failed to allocate bitmap for %lu ASIDs\n",
NUM_USER_ASIDS);
pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS),
sizeof(*pinned_asid_map), GFP_KERNEL);
pinned_asid_map = bitmap_zalloc(NUM_USER_ASIDS, GFP_KERNEL);
nr_pinned_asids = 0;
/*

View file

@ -99,6 +99,8 @@ static void mem_abort_decode(unsigned int esr)
pr_alert(" EA = %lu, S1PTW = %lu\n",
(esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT,
(esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT);
pr_alert(" FSC = 0x%02x: %s\n", (esr & ESR_ELx_FSC),
esr_to_fault_info(esr)->name);
if (esr_is_data_abort(esr))
data_abort_decode(esr);
@ -232,13 +234,17 @@ static bool is_el1_instruction_abort(unsigned int esr)
return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR;
}
static bool is_el1_data_abort(unsigned int esr)
{
return ESR_ELx_EC(esr) == ESR_ELx_EC_DABT_CUR;
}
static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE;
if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR)
if (!is_el1_data_abort(esr) && !is_el1_instruction_abort(esr))
return false;
if (fsc_type == ESR_ELx_FSC_PERM)
@ -258,7 +264,7 @@ static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr,
unsigned long flags;
u64 par, dfsc;
if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR ||
if (!is_el1_data_abort(esr) ||
(esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
return false;
@ -346,10 +352,9 @@ static void do_tag_recovery(unsigned long addr, unsigned int esr,
static bool is_el1_mte_sync_tag_check_fault(unsigned int esr)
{
unsigned int ec = ESR_ELx_EC(esr);
unsigned int fsc = esr & ESR_ELx_FSC;
if (ec != ESR_ELx_EC_DABT_CUR)
if (!is_el1_data_abort(esr))
return false;
if (fsc == ESR_ELx_FSC_MTE)
@ -504,7 +509,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/
if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS;
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
return handle_mm_fault(vma, addr, mm_flags, regs);
}
static bool is_el0_instruction_abort(unsigned int esr)

View file

@ -499,6 +499,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
#endif
/*
* Selected page table levels should match when derived from
* scratch using the virtual address range and page size.
*/
BUILD_BUG_ON(ARM64_HW_PGTABLE_LEVELS(CONFIG_ARM64_VA_BITS) !=
CONFIG_PGTABLE_LEVELS);
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
/*

View file

@ -228,7 +228,7 @@ static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end,
next = pmd_addr_end(addr, end);
/* try section mapping first */
if (((addr | next | phys) & ~SECTION_MASK) == 0 &&
if (((addr | next | phys) & ~PMD_MASK) == 0 &&
(flags & NO_BLOCK_MAPPINGS) == 0) {
pmd_set_huge(pmdp, phys, prot);
@ -1113,14 +1113,14 @@ static void free_empty_tables(unsigned long addr, unsigned long end,
}
#endif
#if !ARM64_SWAPPER_USES_SECTION_MAPS
#if !ARM64_KERNEL_USES_PMD_MAPS
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
return vmemmap_populate_basepages(start, end, node, altmap);
}
#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
#else /* !ARM64_KERNEL_USES_PMD_MAPS */
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
@ -1165,17 +1165,18 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return 0;
}
#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */
#endif /* !ARM64_KERNEL_USES_PMD_MAPS */
#ifdef CONFIG_MEMORY_HOTPLUG
void vmemmap_free(unsigned long start, unsigned long end,
struct vmem_altmap *altmap)
{
#ifdef CONFIG_MEMORY_HOTPLUG
WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END));
unmap_hotplug_range(start, end, true, altmap);
free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END);
#endif
}
#endif /* CONFIG_MEMORY_HOTPLUG */
static inline pud_t *fixmap_pud(unsigned long addr)
{

View file

@ -58,10 +58,8 @@
#define MAIR_EL1_SET \
(MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \
MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \
MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED))
#ifdef CONFIG_CPU_PM

View file

@ -157,10 +157,6 @@ static const struct prot_bits pte_bits[] = {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_DEVICE_nGnRE),
.set = "DEVICE/nGnRE",
}, {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_DEVICE_GRE),
.set = "DEVICE/GRE",
}, {
.mask = PTE_ATTRINDX_MASK,
.val = PTE_ATTRINDX(MT_NORMAL_NC),