arm: Assume __ARM_ARCH == 7

The only supported 32-bit Arm architecture is Armv7. Remove old checks
for earlier architecture revisions.

Sponsored by:	Arm Ltd
Differential Revision:	https://reviews.freebsd.org/D45957
This commit is contained in:
Andrew Turner 2024-07-12 11:44:50 +00:00
parent bf18be5ab7
commit d29771a722
16 changed files with 17 additions and 156 deletions

View file

@ -47,11 +47,6 @@
/* Invalidate D cache to PoC. (aka all cache levels)*/
ASENTRY_NP(dcache_inv_poc_all)
#if __ARM_ARCH == 6
mcr CP15_DCIALL
DSB
bx lr
#else
mrc CP15_CLIDR(r0)
ands r0, r0, #0x07000000
mov r0, r0, lsr #23 /* Get LoC 'naturally' aligned for */
@ -97,16 +92,10 @@ ASENTRY_NP(dcache_inv_poc_all)
mcr CP15_CSSELR(r0)
isb
bx lr
#endif /* __ARM_ARCH == 6 */
END(dcache_inv_poc_all)
/* Invalidate D cache to PoU. (aka L1 cache only)*/
ASENTRY_NP(dcache_inv_pou_all)
#if __ARM_ARCH == 6
mcr CP15_DCIALL
DSB
bx lr
#else
mrc CP15_CLIDR(r0)
ands r0, r0, #0x38000000
mov r0, r0, lsr #26 /* Get LoUU (naturally aligned) */
@ -151,16 +140,10 @@ ASENTRY_NP(dcache_inv_pou_all)
mov r0, #0
mcr CP15_CSSELR(r0)
bx lr
#endif
END(dcache_inv_pou_all)
/* Write back and Invalidate D cache to PoC. */
ASENTRY_NP(dcache_wbinv_poc_all)
#if __ARM_ARCH == 6
mcr CP15_DCCIALL
DSB
bx lr
#else
mrc CP15_CLIDR(r0)
ands r0, r0, #0x07000000
beq 4f
@ -208,7 +191,6 @@ ASENTRY_NP(dcache_wbinv_poc_all)
mov r0, #0
mcr CP15_CSSELR(r0)
bx lr
#endif /* __ARM_ARCH == 6 */
END(dcache_wbinv_poc_all)
ASENTRY_NP(dcache_wb_pou_checked)

View file

@ -270,13 +270,9 @@ ASENTRY_NP(undefined_entry)
mov r0, sp /* exception exit routine. pass frame */
ldr r2, [sp, #(TF_PC)] /* load pc */
#if __ARM_ARCH >= 7
tst r4, #(PSR_T) /* test if PSR_T */
subne r2, r2, #(THUMB_INSN_SIZE)
subeq r2, r2, #(INSN_SIZE)
#else
sub r2, r2, #(INSN_SIZE) /* fix pc */
#endif
str r2, [sp, #TF_PC] /* store pc */
#ifdef KDTRACE_HOOKS

View file

@ -353,12 +353,10 @@ sendsig(sig_t catcher, ksiginfo_t *ksi, sigset_t *mask)
tf->tf_usr_lr = (register_t)(PROC_PS_STRINGS(p) -
*(sysent->sv_szsigcode));
/* Set the mode to enter in the signal handler */
#if __ARM_ARCH >= 7
if ((register_t)catcher & 1)
tf->tf_spsr |= PSR_T;
else
tf->tf_spsr &= ~PSR_T;
#endif
CTR3(KTR_SIG, "sendsig: return td=%p pc=%#x sp=%#x", td, tf->tf_usr_lr,
tf->tf_usr_sp);

View file

@ -30,7 +30,6 @@
#include <machine/asmacros.h>
#include <machine/armreg.h>
#include <machine/sysreg.h>
#if __ARM_ARCH >= 7
#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
.arch_extension virt
#endif
@ -81,5 +80,4 @@ _C_LABEL(hypervisor_stub_vect):
b hypervisor_stub_trap /* HYP-Mode */
.word 0 /* FIQ */
.word 0 /* IRQ */
#endif /* __ARM_ARCH >= 7 */

View file

@ -40,7 +40,6 @@
#define LOCORE_MAP_MB 64
#endif
#if __ARM_ARCH >= 7
#if defined(__ARM_ARCH_7VE__) || defined(__clang__)
/*
* HYP support is in bintuils >= 2.21 and gcc >= 4.9 defines __ARM_ARCH_7VE__
@ -48,7 +47,6 @@
*/
.arch_extension virt
#endif
#endif /* __ARM_ARCH >= 7 */
/* A small statically-allocated stack used only during initarm() and AP startup. */
#define INIT_ARM_STACK_SIZE 2048
@ -59,7 +57,6 @@
.globl kernbase
.set kernbase,KERNVIRTADDR
#if __ARM_ARCH >= 7
#define HANDLE_HYP \
/* Leave HYP mode */ ;\
mrs r0, cpsr ;\
@ -86,9 +83,6 @@
adr r1, hypmode_enabled ;\
str r0, [r1] ;\
2:
#else
#define HANDLE_HYP
#endif /* __ARM_ARCH >= 7 */
/*
* On entry for FreeBSD boot ABI:

View file

@ -169,9 +169,7 @@ init_secondary(int cpu)
/* Spin until the BSP releases the APs */
while (!atomic_load_acq_int(&aps_ready)) {
#if __ARM_ARCH >= 7
__asm __volatile("wfe");
#endif
}
/* Initialize curthread */

View file

@ -55,7 +55,7 @@
#include "pmu.h"
/* CCNT */
#if defined(__arm__) && (__ARM_ARCH > 6)
#if defined(__arm__)
int pmu_attched = 0;
uint32_t ccnt_hi[MAXCPU];
#endif
@ -67,7 +67,7 @@ static int
pmu_intr(void *arg)
{
uint32_t r;
#if defined(__arm__) && (__ARM_ARCH > 6)
#if defined(__arm__)
u_int cpu;
cpu = PCPU_GET(cpuid);
@ -96,7 +96,7 @@ int
pmu_attach(device_t dev)
{
struct pmu_softc *sc;
#if defined(__arm__) && (__ARM_ARCH > 6)
#if defined(__arm__)
uint32_t iesr;
#endif
int err, i;
@ -126,7 +126,7 @@ pmu_attach(device_t dev)
}
}
#if defined(__arm__) && (__ARM_ARCH > 6)
#if defined(__arm__)
/* Initialize to 0. */
for (i = 0; i < MAXCPU; i++)
ccnt_hi[i] = 0;

View file

@ -291,11 +291,7 @@ abort_handler(struct trapframe *tf, int prefetch)
td = curthread;
fsr = (prefetch) ? cp15_ifsr_get(): cp15_dfsr_get();
#if __ARM_ARCH >= 7
far = (prefetch) ? cp15_ifar_get() : cp15_dfar_get();
#else
far = (prefetch) ? TRAPF_PC(tf) : cp15_dfar_get();
#endif
idx = FSR_TO_FAULT(fsr);
usermode = TRAPF_USERMODE(tf); /* Abort came from user mode? */

View file

@ -279,7 +279,6 @@ undefinedinstruction(struct trapframe *frame)
coprocessor = COPROC_VFP; /* vfp / simd */
}
} else {
#if __ARM_ARCH >= 7
fault_instruction = *(uint16_t *)fault_pc;
if (THUMB_32BIT_INSN(fault_instruction)) {
fault_instruction <<= 16;
@ -294,18 +293,6 @@ undefinedinstruction(struct trapframe *frame)
coprocessor = COPROC_VFP; /* SIMD */
}
}
#else
/*
* No support for Thumb-2 on this cpu
*/
ksiginfo_init_trap(&ksi);
ksi.ksi_signo = SIGILL;
ksi.ksi_code = ILL_ILLADR;
ksi.ksi_addr = (u_int32_t *)(intptr_t) fault_pc;
trapsignal(td, &ksi);
userret(td, frame);
return;
#endif
}
if ((frame->tf_spsr & PSR_MODE) == PSR_USR32_MODE) {

View file

@ -162,11 +162,9 @@ cpu_set_syscall_retval(struct thread *td, int error)
/*
* Reconstruct the pc to point at the swi.
*/
#if __ARM_ARCH >= 7
if ((frame->tf_spsr & PSR_T) != 0)
frame->tf_pc -= THUMB_INSN_SIZE;
else
#endif
frame->tf_pc -= INSN_SIZE;
break;
case EJUSTRETURN:

View file

@ -174,7 +174,6 @@
# define RETne bxne lr
# define RETc(c) bx##c lr
#if __ARM_ARCH >= 7
#define ISB isb
#define DSB dsb
#define DMB dmb
@ -188,12 +187,5 @@
#define ERET .word 0xe160006e
#endif
#elif __ARM_ARCH == 6
#include <machine/sysreg.h>
#define ISB mcr CP15_CP15ISB
#define DSB mcr CP15_CP15DSB
#define DMB mcr CP15_CP15DMB
#define WFI mcr CP15_CP15WFI
#endif
#endif /* !_MACHINE_ASM_H_ */

View file

@ -41,15 +41,9 @@
#include <sys/atomic_common.h>
#if __ARM_ARCH >= 7
#define isb() __asm __volatile("isb" : : : "memory")
#define dsb() __asm __volatile("dsb" : : : "memory")
#define dmb() __asm __volatile("dmb" : : : "memory")
#else
#define isb() __asm __volatile("mcr p15, 0, %0, c7, c5, 4" : : "r" (0) : "memory")
#define dsb() __asm __volatile("mcr p15, 0, %0, c7, c10, 4" : : "r" (0) : "memory")
#define dmb() __asm __volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory")
#endif
#define mb() dmb()
#define wmb() dmb()

View file

@ -45,13 +45,7 @@ void cpu_halt(void);
* unconditionally with -DSMP. Although it looks like a bug,
* handle this case here and in #elif condition in ARM_SMP_UP macro.
*/
#if __ARM_ARCH <= 6 && defined(SMP) && !defined(KLD_MODULE)
#error SMP option is not supported on ARMv6
#endif
#if __ARM_ARCH <= 6 && defined(SMP_ON_UP)
#error SMP_ON_UP option is only supported on ARMv7+ CPUs
#endif
#if !defined(SMP) && defined(SMP_ON_UP)
#error SMP option must be defined for SMP_ON_UP option
@ -68,7 +62,7 @@ do { \
up_code; \
} \
} while (0)
#elif defined(SMP) && __ARM_ARCH > 6
#elif defined(SMP)
#define ARM_SMP_UP(smp_code, up_code) \
do { \
smp_code; \
@ -146,15 +140,15 @@ fname(uint64_t reg) \
/* TLB */
_WF0(_CP15_TLBIALL, CP15_TLBIALL) /* Invalidate entire unified TLB */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
_WF0(_CP15_TLBIALLIS, CP15_TLBIALLIS) /* Invalidate entire unified TLB IS */
#endif
_WF1(_CP15_TLBIASID, CP15_TLBIASID(%0)) /* Invalidate unified TLB by ASID */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
_WF1(_CP15_TLBIASIDIS, CP15_TLBIASIDIS(%0)) /* Invalidate unified TLB by ASID IS */
#endif
_WF1(_CP15_TLBIMVAA, CP15_TLBIMVAA(%0)) /* Invalidate unified TLB by MVA, all ASID */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
_WF1(_CP15_TLBIMVAAIS, CP15_TLBIMVAAIS(%0)) /* Invalidate unified TLB by MVA, all ASID IS */
#endif
_WF1(_CP15_TLBIMVA, CP15_TLBIMVA(%0)) /* Invalidate unified TLB by MVA */
@ -164,21 +158,19 @@ _WF1(_CP15_TTB_SET, CP15_TTBR0(%0))
/* Cache and Branch predictor */
_WF0(_CP15_BPIALL, CP15_BPIALL) /* Branch predictor invalidate all */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
_WF0(_CP15_BPIALLIS, CP15_BPIALLIS) /* Branch predictor invalidate all IS */
#endif
_WF1(_CP15_BPIMVA, CP15_BPIMVA(%0)) /* Branch predictor invalidate by MVA */
_WF1(_CP15_DCCIMVAC, CP15_DCCIMVAC(%0)) /* Data cache clean and invalidate by MVA PoC */
_WF1(_CP15_DCCISW, CP15_DCCISW(%0)) /* Data cache clean and invalidate by set/way */
_WF1(_CP15_DCCMVAC, CP15_DCCMVAC(%0)) /* Data cache clean by MVA PoC */
#if __ARM_ARCH >= 7
_WF1(_CP15_DCCMVAU, CP15_DCCMVAU(%0)) /* Data cache clean by MVA PoU */
#endif
_WF1(_CP15_DCCSW, CP15_DCCSW(%0)) /* Data cache clean by set/way */
_WF1(_CP15_DCIMVAC, CP15_DCIMVAC(%0)) /* Data cache invalidate by MVA PoC */
_WF1(_CP15_DCISW, CP15_DCISW(%0)) /* Data cache invalidate by set/way */
_WF0(_CP15_ICIALLU, CP15_ICIALLU) /* Instruction cache invalidate all PoU */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
_WF0(_CP15_ICIALLUIS, CP15_ICIALLUIS) /* Instruction cache invalidate all PoU IS */
#endif
_WF1(_CP15_ICIMVAU, CP15_ICIMVAU(%0)) /* Instruction cache invalidate */
@ -209,10 +201,8 @@ _WF1(cp15_prrr_set, CP15_PRRR(%0))
_WF1(cp15_nmrr_set, CP15_NMRR(%0))
_RF0(cp15_ttbr_get, CP15_TTBR0(%0))
_RF0(cp15_dfar_get, CP15_DFAR(%0))
#if __ARM_ARCH >= 7
_RF0(cp15_ifar_get, CP15_IFAR(%0))
_RF0(cp15_l2ctlr_get, CP15_L2CTLR(%0))
#endif
_RF0(cp15_actlr_get, CP15_ACTLR(%0))
_WF1(cp15_actlr_set, CP15_ACTLR(%0))
_WF1(cp15_ats1cpr_set, CP15_ATS1CPR(%0))
@ -251,14 +241,6 @@ _RF0(cp15_cbar_get, CP15_CBAR(%0))
/* Performance Monitor registers */
#if __ARM_ARCH == 6 && defined(CPU_ARM1176)
_RF0(cp15_pmuserenr_get, CP15_PMUSERENR(%0))
_WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
_RF0(cp15_pmcr_get, CP15_PMCR(%0))
_WF1(cp15_pmcr_set, CP15_PMCR(%0))
_RF0(cp15_pmccntr_get, CP15_PMCCNTR(%0))
_WF1(cp15_pmccntr_set, CP15_PMCCNTR(%0))
#elif __ARM_ARCH > 6
_RF0(cp15_pmcr_get, CP15_PMCR(%0))
_WF1(cp15_pmcr_set, CP15_PMCR(%0))
_RF0(cp15_pmcnten_get, CP15_PMCNTENSET(%0))
@ -280,7 +262,6 @@ _WF1(cp15_pmuserenr_set, CP15_PMUSERENR(%0))
_RF0(cp15_pminten_get, CP15_PMINTENSET(%0))
_WF1(cp15_pminten_set, CP15_PMINTENSET(%0))
_WF1(cp15_pminten_clr, CP15_PMINTENCLR(%0))
#endif
_RF0(cp15_tpidrurw_get, CP15_TPIDRURW(%0))
_WF1(cp15_tpidrurw_set, CP15_TPIDRURW(%0))
@ -380,7 +361,7 @@ tlb_flush_range_local(vm_offset_t va, vm_size_t size)
}
/* Broadcasting operations. */
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
static __inline void
tlb_flush_all(void)
@ -442,14 +423,14 @@ tlb_flush_range(vm_offset_t va, vm_size_t size)
);
dsb();
}
#else /* __ARM_ARCH < 7 */
#else /* !SMP */
#define tlb_flush_all() tlb_flush_all_local()
#define tlb_flush_all_ng() tlb_flush_all_ng_local()
#define tlb_flush(va) tlb_flush_local(va)
#define tlb_flush_range(va, size) tlb_flush_range_local(va, size)
#endif /* __ARM_ARCH < 7 */
#endif /* !SMP */
/*
* Cache maintenance operations.
@ -465,11 +446,7 @@ icache_sync(vm_offset_t va, vm_size_t size)
va &= ~cpuinfo.dcache_line_mask;
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
#if __ARM_ARCH >= 7
_CP15_DCCMVAU(va);
#else
_CP15_DCCMVAC(va);
#endif
}
dsb();
ARM_SMP_UP(
@ -515,11 +492,7 @@ dcache_wb_pou(vm_offset_t va, vm_size_t size)
dsb();
va &= ~cpuinfo.dcache_line_mask;
for ( ; va < eva; va += cpuinfo.dcache_line_size) {
#if __ARM_ARCH >= 7
_CP15_DCCMVAU(va);
#else
_CP15_DCCMVAC(va);
#endif
}
dsb();
}
@ -686,8 +659,7 @@ cp15_ats1cuw_check(vm_offset_t addr)
static __inline uint64_t
get_cyclecount(void)
{
#if __ARM_ARCH > 6 || (__ARM_ARCH == 6 && defined(CPU_ARM1176))
#if (__ARM_ARCH > 6) && defined(DEV_PMU)
#if defined(DEV_PMU)
if (pmu_attched) {
u_int cpu;
uint64_t h, h2;
@ -711,12 +683,6 @@ get_cyclecount(void)
} else
#endif
return cp15_pmccntr_get();
#else /* No performance counters, so use nanotime(9). */
struct timespec tv;
nanotime(&tv);
return (tv.tv_sec * (uint64_t)1000000000ull + tv.tv_nsec);
#endif
}
#endif

View file

@ -55,11 +55,7 @@
#define MACHINE "arm"
#endif
#ifndef MACHINE_ARCH
#if __ARM_ARCH >= 7
#define MACHINE_ARCH "armv7"
#else
#define MACHINE_ARCH "armv6"
#endif
#endif
#ifdef SMP

View file

@ -127,11 +127,9 @@
/* From ARMv6: */
#define CP15_IFSR(rr) p15, 0, rr, c5, c0, 1 /* Instruction Fault Status Register */
#if __ARM_ARCH >= 7
/* From ARMv7: */
#define CP15_ADFSR(rr) p15, 0, rr, c5, c1, 0 /* Auxiliary Data Fault Status Register */
#define CP15_AIFSR(rr) p15, 0, rr, c5, c1, 1 /* Auxiliary Instruction Fault Status Register */
#endif
/*
* CP15 C6 registers
@ -147,7 +145,7 @@
/*
* CP15 C7 registers
*/
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
/* From ARMv7: */
#define CP15_ICIALLUIS p15, 0, r0, c7, c1, 0 /* Instruction cache invalidate all PoU, IS */
#define CP15_BPIALLIS p15, 0, r0, c7, c1, 6 /* Branch predictor invalidate all IS */
@ -157,17 +155,9 @@
#define CP15_ICIALLU p15, 0, r0, c7, c5, 0 /* Instruction cache invalidate all PoU */
#define CP15_ICIMVAU(rr) p15, 0, rr, c7, c5, 1 /* Instruction cache invalidate */
#if __ARM_ARCH == 6
/* Deprecated in ARMv7 */
#define CP15_CP15ISB p15, 0, r0, c7, c5, 4 /* ISB */
#endif
#define CP15_BPIALL p15, 0, r0, c7, c5, 6 /* Branch predictor invalidate all */
#define CP15_BPIMVA p15, 0, rr, c7, c5, 7 /* Branch predictor invalidate by MVA */
#if __ARM_ARCH == 6
/* Only ARMv6: */
#define CP15_DCIALL p15, 0, r0, c7, c6, 0 /* Data cache invalidate all */
#endif
#define CP15_DCIMVAC(rr) p15, 0, rr, c7, c6, 1 /* Data cache invalidate by MVA PoC */
#define CP15_DCISW(rr) p15, 0, rr, c7, c6, 2 /* Data cache invalidate by set/way */
@ -176,43 +166,25 @@
#define CP15_ATS1CUR(rr) p15, 0, rr, c7, c8, 2 /* Stage 1 Current state unprivileged read */
#define CP15_ATS1CUW(rr) p15, 0, rr, c7, c8, 3 /* Stage 1 Current state unprivileged write */
#if __ARM_ARCH >= 7
/* From ARMv7: */
#define CP15_ATS12NSOPR(rr) p15, 0, rr, c7, c8, 4 /* Stages 1 and 2 Non-secure only PL1 read */
#define CP15_ATS12NSOPW(rr) p15, 0, rr, c7, c8, 5 /* Stages 1 and 2 Non-secure only PL1 write */
#define CP15_ATS12NSOUR(rr) p15, 0, rr, c7, c8, 6 /* Stages 1 and 2 Non-secure only unprivileged read */
#define CP15_ATS12NSOUW(rr) p15, 0, rr, c7, c8, 7 /* Stages 1 and 2 Non-secure only unprivileged write */
#endif
#if __ARM_ARCH == 6
/* Only ARMv6: */
#define CP15_DCCALL p15, 0, r0, c7, c10, 0 /* Data cache clean all */
#endif
#define CP15_DCCMVAC(rr) p15, 0, rr, c7, c10, 1 /* Data cache clean by MVA PoC */
#define CP15_DCCSW(rr) p15, 0, rr, c7, c10, 2 /* Data cache clean by set/way */
#if __ARM_ARCH == 6
/* Only ARMv6: */
#define CP15_CP15DSB p15, 0, r0, c7, c10, 4 /* DSB */
#define CP15_CP15DMB p15, 0, r0, c7, c10, 5 /* DMB */
#define CP15_CP15WFI p15, 0, r0, c7, c0, 4 /* WFI */
#endif
#if __ARM_ARCH >= 7
/* From ARMv7: */
#define CP15_DCCMVAU(rr) p15, 0, rr, c7, c11, 1 /* Data cache clean by MVA PoU */
#endif
#if __ARM_ARCH == 6
/* Only ARMv6: */
#define CP15_DCCIALL p15, 0, r0, c7, c14, 0 /* Data cache clean and invalidate all */
#endif
#define CP15_DCCIMVAC(rr) p15, 0, rr, c7, c14, 1 /* Data cache clean and invalidate by MVA PoC */
#define CP15_DCCISW(rr) p15, 0, rr, c7, c14, 2 /* Data cache clean and invalidate by set/way */
/*
* CP15 C8 registers
*/
#if __ARM_ARCH >= 7 && defined(SMP)
#if defined(SMP)
/* From ARMv7: */
#define CP15_TLBIALLIS p15, 0, r0, c8, c3, 0 /* Invalidate entire unified TLB IS */
#define CP15_TLBIMVAIS(rr) p15, 0, rr, c8, c3, 1 /* Invalidate unified TLB by MVA IS */
@ -232,11 +204,6 @@
/*
* CP15 C9 registers
*/
#if __ARM_ARCH == 6 && defined(CPU_ARM1176)
#define CP15_PMUSERENR(rr) p15, 0, rr, c15, c9, 0 /* Access Validation Control Register */
#define CP15_PMCR(rr) p15, 0, rr, c15, c12, 0 /* Performance Monitor Control Register */
#define CP15_PMCCNTR(rr) p15, 0, rr, c15, c12, 1 /* PM Cycle Count Register */
#else
#define CP15_L2CTLR(rr) p15, 1, rr, c9, c0, 2 /* L2 Control Register */
#define CP15_PMCR(rr) p15, 0, rr, c9, c12, 0 /* Performance Monitor Control Register */
#define CP15_PMCNTENSET(rr) p15, 0, rr, c9, c12, 1 /* PM Count Enable Set Register */
@ -250,7 +217,6 @@
#define CP15_PMUSERENR(rr) p15, 0, rr, c9, c14, 0 /* PM User Enable Register */
#define CP15_PMINTENSET(rr) p15, 0, rr, c9, c14, 1 /* PM Interrupt Enable Set Register */
#define CP15_PMINTENCLR(rr) p15, 0, rr, c9, c14, 2 /* PM Interrupt Enable Clear Register */
#endif
/*
* CP15 C10 registers

View file

@ -119,7 +119,7 @@ SYSCTL_INT(_debug, OID_AUTO, __elfN(legacy_coredump), CTLFLAG_RW,
int __elfN(nxstack) =
#if defined(__amd64__) || defined(__powerpc64__) /* both 64 and 32 bit */ || \
(defined(__arm__) && __ARM_ARCH >= 7) || defined(__aarch64__) || \
defined(__arm__) || defined(__aarch64__) || \
defined(__riscv)
1;
#else