linux/arch/arm/mm/tlb-v7.S
Ard Biesheuvel 6b0ef2792c ARM: 9384/2: mm: Make tlbflush routines CFI safe
Instead of avoiding CFI entirely on the TLB flush helpers, reorganize
the code so that the CFI machinery can deal with it. The important
things to take into account are:
- functions in asm called indirectly from C need to be defined using
  SYM_TYPED_FUNC_START()
- a reference to the asm function needs to be visible to the compiler,
  in order to get it to emit the typeid symbol.

The latter means that defining the cpu_tlb_fns structs is best done from
C code, so that the references in the static initializers will be
visible to the compiler.

Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Tested-by: Kees Cook <keescook@chromium.org>
Reviewed-by: Sami Tolvanen <samitolvanen@google.com>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
Signed-off-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
2024-04-29 14:14:15 +01:00

91 lines
2.4 KiB
ArmAsm

/* SPDX-License-Identifier: GPL-2.0-only */
/*
* linux/arch/arm/mm/tlb-v7.S
*
* Copyright (C) 1997-2002 Russell King
* Modified for ARMv7 by Catalin Marinas
*
* ARM architecture version 6 TLB handling functions.
* These assume a split I/D TLB.
*/
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/cfi_types.h>
#include <asm/assembler.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#include <asm/tlbflush.h>
#include "proc-macros.S"
.arch armv7-a
/*
* v7wbi_flush_user_tlb_range(start, end, vma)
*
* Invalidate a range of TLB entries in the specified address space.
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
* - vma - vm_area_struct describing address range
*
* It is assumed that:
* - the "Invalidate single entry" instruction will invalidate
* both the I and the D TLBs on Harvard-style TLBs
*/
SYM_TYPED_FUNC_START(v7wbi_flush_user_tlb_range)
vma_vm_mm r3, r2 @ get vma->vm_mm
mmid r3, r3 @ get vm_mm->context.id
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
asid r3, r3 @ mask ASID
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(W(mov) r3, #0 )
ALT_UP(W(nop) )
#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
dsb ish
ret lr
SYM_FUNC_END(v7wbi_flush_user_tlb_range)
/*
* v7wbi_flush_kern_tlb_range(start,end)
*
* Invalidate a range of kernel TLB entries
*
* - start - start address (may not be aligned)
* - end - end address (exclusive, may not be aligned)
*/
SYM_TYPED_FUNC_START(v7wbi_flush_kern_tlb_range)
dsb ish
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
#ifdef CONFIG_ARM_ERRATA_720789
ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
blo 1b
dsb ish
isb
ret lr
SYM_FUNC_END(v7wbi_flush_kern_tlb_range)