mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
54c8818aa2
In system_supports_cnp() we use cpus_have_const_cap() to check for ARM64_HAS_CNP, but this is only necessary so that the cpu_enable_cnp() callback can run prior to alternatives being patched, and otherwise this is not necessary and alternative_has_cap_*() would be preferable. For historical reasons, cpus_have_const_cap() is more complicated than it needs to be. Before cpucaps are finalized, it will perform a bitmap test of the system_cpucaps bitmap, and once cpucaps are finalized it will use an alternative branch. This used to be necessary to handle some race conditions in the window between cpucap detection and the subsequent patching of alternatives and static branches, where different branches could be out-of-sync with one another (or w.r.t. alternative sequences). Now that we use alternative branches instead of static branches, these are all patched atomically w.r.t. one another, and there are only a handful of cases that need special care in the window between cpucap detection and alternative patching. Due to the above, it would be nice to remove cpus_have_const_cap(), and migrate callers over to alternative_has_cap_*(), cpus_have_final_cap(), or cpus_have_cap() depending on when their requirements. This will remove redundant instructions and improve code generation, and will make it easier to determine how each callsite will behave before, during, and after alternative patching. The cpu_enable_cnp() callback is run immediately after the ARM64_HAS_CNP cpucap is detected system-wide under setup_system_capabilities(), prior to alternatives being patched. During this window cpu_enable_cnp() uses cpu_replace_ttbr1() to set the CNP bit for the swapper_pg_dir in TTBR1. No other users of the ARM64_HAS_CNP cpucap need the up-to-date value during this window: * As KVM isn't initialized yet, kvm_get_vttbr() isn't reachable. * As cpuidle isn't initialized yet, __cpu_suspend_exit() isn't reachable. * At this point all CPUs are using the swapper_pg_dir with a reserved ASID in TTBR1, and the idmap_pg_dir in TTBR0, so neither check_and_switch_context() nor cpu_do_switch_mm() need to do anything special. This patch replaces the use of cpus_have_const_cap() with alternative_has_cap_unlikely(), which will avoid generating code to test the system_cpucaps bitmap and should be better for all subsequent calls at runtime. To allow cpu_enable_cnp() to function prior to alternatives being patched, cpu_replace_ttbr1() is split into cpu_replace_ttbr1() and cpu_enable_swapper_cnp(), with the former only used for early TTBR1 replacement, and the latter used by both cpu_enable_cnp() and __cpu_suspend_exit(). Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Ard Biesheuvel <ardb@kernel.org> Cc: Suzuki K Poulose <suzuki.poulose@arm.com> Cc: Vladimir Murzin <vladimir.murzin@arm.com> Cc: Will Deacon <will@kernel.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
175 lines
4.6 KiB
C
175 lines
4.6 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
#include <linux/ftrace.h>
|
|
#include <linux/percpu.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/pgtable.h>
|
|
#include <linux/cpuidle.h>
|
|
#include <asm/alternative.h>
|
|
#include <asm/cacheflush.h>
|
|
#include <asm/cpufeature.h>
|
|
#include <asm/cpuidle.h>
|
|
#include <asm/daifflags.h>
|
|
#include <asm/debug-monitors.h>
|
|
#include <asm/exec.h>
|
|
#include <asm/mte.h>
|
|
#include <asm/memory.h>
|
|
#include <asm/mmu_context.h>
|
|
#include <asm/smp_plat.h>
|
|
#include <asm/suspend.h>
|
|
|
|
/*
|
|
* This is allocated by cpu_suspend_init(), and used to store a pointer to
|
|
* the 'struct sleep_stack_data' the contains a particular CPUs state.
|
|
*/
|
|
unsigned long *sleep_save_stash;
|
|
|
|
/*
|
|
* This hook is provided so that cpu_suspend code can restore HW
|
|
* breakpoints as early as possible in the resume path, before reenabling
|
|
* debug exceptions. Code cannot be run from a CPU PM notifier since by the
|
|
* time the notifier runs debug exceptions might have been enabled already,
|
|
* with HW breakpoints registers content still in an unknown state.
|
|
*/
|
|
static int (*hw_breakpoint_restore)(unsigned int);
|
|
void __init cpu_suspend_set_dbg_restorer(int (*hw_bp_restore)(unsigned int))
|
|
{
|
|
/* Prevent multiple restore hook initializations */
|
|
if (WARN_ON(hw_breakpoint_restore))
|
|
return;
|
|
hw_breakpoint_restore = hw_bp_restore;
|
|
}
|
|
|
|
void notrace __cpu_suspend_exit(void)
|
|
{
|
|
unsigned int cpu = smp_processor_id();
|
|
|
|
mte_suspend_exit();
|
|
|
|
/*
|
|
* We are resuming from reset with the idmap active in TTBR0_EL1.
|
|
* We must uninstall the idmap and restore the expected MMU
|
|
* state before we can possibly return to userspace.
|
|
*/
|
|
cpu_uninstall_idmap();
|
|
|
|
/* Restore CnP bit in TTBR1_EL1 */
|
|
if (system_supports_cnp())
|
|
cpu_enable_swapper_cnp();
|
|
|
|
/*
|
|
* PSTATE was not saved over suspend/resume, re-enable any detected
|
|
* features that might not have been set correctly.
|
|
*/
|
|
if (cpus_have_const_cap(ARM64_HAS_DIT))
|
|
set_pstate_dit(1);
|
|
__uaccess_enable_hw_pan();
|
|
|
|
/*
|
|
* Restore HW breakpoint registers to sane values
|
|
* before debug exceptions are possibly reenabled
|
|
* by cpu_suspend()s local_daif_restore() call.
|
|
*/
|
|
if (hw_breakpoint_restore)
|
|
hw_breakpoint_restore(cpu);
|
|
|
|
/*
|
|
* On resume, firmware implementing dynamic mitigation will
|
|
* have turned the mitigation on. If the user has forcefully
|
|
* disabled it, make sure their wishes are obeyed.
|
|
*/
|
|
spectre_v4_enable_mitigation(NULL);
|
|
|
|
/* Restore additional feature-specific configuration */
|
|
ptrauth_suspend_exit();
|
|
}
|
|
|
|
/*
|
|
* cpu_suspend
|
|
*
|
|
* arg: argument to pass to the finisher function
|
|
* fn: finisher function pointer
|
|
*
|
|
*/
|
|
int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
|
|
{
|
|
int ret = 0;
|
|
unsigned long flags;
|
|
struct sleep_stack_data state;
|
|
struct arm_cpuidle_irq_context context;
|
|
|
|
/* Report any MTE async fault before going to suspend */
|
|
mte_suspend_enter();
|
|
|
|
/*
|
|
* From this point debug exceptions are disabled to prevent
|
|
* updates to mdscr register (saved and restored along with
|
|
* general purpose registers) from kernel debuggers.
|
|
*
|
|
* Strictly speaking the trace_hardirqs_off() here is superfluous,
|
|
* hardirqs should be firmly off by now. This really ought to use
|
|
* something like raw_local_daif_save().
|
|
*/
|
|
flags = local_daif_save();
|
|
|
|
/*
|
|
* Function graph tracer state gets inconsistent when the kernel
|
|
* calls functions that never return (aka suspend finishers) hence
|
|
* disable graph tracing during their execution.
|
|
*/
|
|
pause_graph_tracing();
|
|
|
|
/*
|
|
* Switch to using DAIF.IF instead of PMR in order to reliably
|
|
* resume if we're using pseudo-NMIs.
|
|
*/
|
|
arm_cpuidle_save_irq_context(&context);
|
|
|
|
ct_cpuidle_enter();
|
|
|
|
if (__cpu_suspend_enter(&state)) {
|
|
/* Call the suspend finisher */
|
|
ret = fn(arg);
|
|
|
|
/*
|
|
* Never gets here, unless the suspend finisher fails.
|
|
* Successful cpu_suspend() should return from cpu_resume(),
|
|
* returning through this code path is considered an error
|
|
* If the return value is set to 0 force ret = -EOPNOTSUPP
|
|
* to make sure a proper error condition is propagated
|
|
*/
|
|
if (!ret)
|
|
ret = -EOPNOTSUPP;
|
|
|
|
ct_cpuidle_exit();
|
|
} else {
|
|
ct_cpuidle_exit();
|
|
__cpu_suspend_exit();
|
|
}
|
|
|
|
arm_cpuidle_restore_irq_context(&context);
|
|
|
|
unpause_graph_tracing();
|
|
|
|
/*
|
|
* Restore pstate flags. OS lock and mdscr have been already
|
|
* restored, so from this point onwards, debugging is fully
|
|
* reenabled if it was enabled when core started shutdown.
|
|
*/
|
|
local_daif_restore(flags);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int __init cpu_suspend_init(void)
|
|
{
|
|
/* ctx_ptr is an array of physical addresses */
|
|
sleep_save_stash = kcalloc(mpidr_hash_size(), sizeof(*sleep_save_stash),
|
|
GFP_KERNEL);
|
|
|
|
if (WARN_ON(!sleep_save_stash))
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
}
|
|
early_initcall(cpu_suspend_init);
|