mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
ca708599ca
Currently we strip the PAC from pointers using C code, which requires generating bitmasks, and conditionally clearing/setting bits depending on bit 55. We can do better by using XPACLRI directly. When the logic was originally written to strip PACs from user pointers, contemporary toolchains used for the kernel had assemblers which were unaware of the PAC instructions. As stripping the PAC from userspace pointers required unconditional clearing of a fixed set of bits (which could be performed with a single instruction), it was simpler to implement the masking in C than it was to make use of XPACI or XPACLRI. When support for in-kernel pointer authentication was added, the stripping logic was extended to cover TTBR1 pointers, requiring several instructions to handle whether to clear/set bits dependent on bit 55 of the pointer. This patch simplifies the stripping of PACs by using XPACLRI directly, as contemporary toolchains do within __builtin_return_address(). This saves a number of instructions, especially where __builtin_return_address() does not implicitly strip the PAC but is heavily used (e.g. with tracepoints). As the kernel might be compiled with an assembler without knowledge of XPACLRI, it is assembled using the 'HINT #7' alias, which results in an identical opcode. At the same time, I've split ptrauth_strip_insn_pac() into ptrauth_strip_user_insn_pac() and ptrauth_strip_kernel_insn_pac() helpers so that we can avoid unnecessary PAC stripping when pointer authentication is not in use in userspace or kernel respectively. The underlying xpaclri() macro uses inline assembly which clobbers x30. The clobber causes the compiler to save/restore the original x30 value in a frame record (protected with PACIASP and AUTIASP when in-kernel authentication is enabled), so this does not provide a gadget to alter the return address. Similarly this does not adversely affect unwinding due to the presence of the frame record. The ptrauth_user_pac_mask() and ptrauth_kernel_pac_mask() are exported from the kernel in ptrace and core dumps, so these are retained. A subsequent patch will move them out of <asm/compiler.h>. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20230412160134.306148-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
242 lines
5.5 KiB
C
242 lines
5.5 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* Stack tracing support
|
|
*
|
|
* Copyright (C) 2012 ARM Ltd.
|
|
*/
|
|
#include <linux/kernel.h>
|
|
#include <linux/efi.h>
|
|
#include <linux/export.h>
|
|
#include <linux/ftrace.h>
|
|
#include <linux/sched.h>
|
|
#include <linux/sched/debug.h>
|
|
#include <linux/sched/task_stack.h>
|
|
#include <linux/stacktrace.h>
|
|
|
|
#include <asm/efi.h>
|
|
#include <asm/irq.h>
|
|
#include <asm/stack_pointer.h>
|
|
#include <asm/stacktrace.h>
|
|
|
|
/*
|
|
* Start an unwind from a pt_regs.
|
|
*
|
|
* The unwind will begin at the PC within the regs.
|
|
*
|
|
* The regs must be on a stack currently owned by the calling task.
|
|
*/
|
|
static __always_inline void
|
|
unwind_init_from_regs(struct unwind_state *state,
|
|
struct pt_regs *regs)
|
|
{
|
|
unwind_init_common(state, current);
|
|
|
|
state->fp = regs->regs[29];
|
|
state->pc = regs->pc;
|
|
}
|
|
|
|
/*
|
|
* Start an unwind from a caller.
|
|
*
|
|
* The unwind will begin at the caller of whichever function this is inlined
|
|
* into.
|
|
*
|
|
* The function which invokes this must be noinline.
|
|
*/
|
|
static __always_inline void
|
|
unwind_init_from_caller(struct unwind_state *state)
|
|
{
|
|
unwind_init_common(state, current);
|
|
|
|
state->fp = (unsigned long)__builtin_frame_address(1);
|
|
state->pc = (unsigned long)__builtin_return_address(0);
|
|
}
|
|
|
|
/*
|
|
* Start an unwind from a blocked task.
|
|
*
|
|
* The unwind will begin at the blocked tasks saved PC (i.e. the caller of
|
|
* cpu_switch_to()).
|
|
*
|
|
* The caller should ensure the task is blocked in cpu_switch_to() for the
|
|
* duration of the unwind, or the unwind will be bogus. It is never valid to
|
|
* call this for the current task.
|
|
*/
|
|
static __always_inline void
|
|
unwind_init_from_task(struct unwind_state *state,
|
|
struct task_struct *task)
|
|
{
|
|
unwind_init_common(state, task);
|
|
|
|
state->fp = thread_saved_fp(task);
|
|
state->pc = thread_saved_pc(task);
|
|
}
|
|
|
|
static __always_inline int
|
|
unwind_recover_return_address(struct unwind_state *state)
|
|
{
|
|
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
|
|
if (state->task->ret_stack &&
|
|
(state->pc == (unsigned long)return_to_handler)) {
|
|
unsigned long orig_pc;
|
|
orig_pc = ftrace_graph_ret_addr(state->task, NULL, state->pc,
|
|
(void *)state->fp);
|
|
if (WARN_ON_ONCE(state->pc == orig_pc))
|
|
return -EINVAL;
|
|
state->pc = orig_pc;
|
|
}
|
|
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
|
|
|
|
#ifdef CONFIG_KRETPROBES
|
|
if (is_kretprobe_trampoline(state->pc)) {
|
|
state->pc = kretprobe_find_ret_addr(state->task,
|
|
(void *)state->fp,
|
|
&state->kr_cur);
|
|
}
|
|
#endif /* CONFIG_KRETPROBES */
|
|
|
|
return 0;
|
|
}
|
|
|
|
/*
|
|
* Unwind from one frame record (A) to the next frame record (B).
|
|
*
|
|
* We terminate early if the location of B indicates a malformed chain of frame
|
|
* records (e.g. a cycle), determined based on the location and fp value of A
|
|
* and the location (but not the fp value) of B.
|
|
*/
|
|
static __always_inline int
|
|
unwind_next(struct unwind_state *state)
|
|
{
|
|
struct task_struct *tsk = state->task;
|
|
unsigned long fp = state->fp;
|
|
int err;
|
|
|
|
/* Final frame; nothing to unwind */
|
|
if (fp == (unsigned long)task_pt_regs(tsk)->stackframe)
|
|
return -ENOENT;
|
|
|
|
err = unwind_next_frame_record(state);
|
|
if (err)
|
|
return err;
|
|
|
|
state->pc = ptrauth_strip_kernel_insn_pac(state->pc);
|
|
|
|
return unwind_recover_return_address(state);
|
|
}
|
|
|
|
static __always_inline void
|
|
unwind(struct unwind_state *state, stack_trace_consume_fn consume_entry,
|
|
void *cookie)
|
|
{
|
|
if (unwind_recover_return_address(state))
|
|
return;
|
|
|
|
while (1) {
|
|
int ret;
|
|
|
|
if (!consume_entry(cookie, state->pc))
|
|
break;
|
|
ret = unwind_next(state);
|
|
if (ret < 0)
|
|
break;
|
|
}
|
|
}
|
|
|
|
/*
|
|
* Per-cpu stacks are only accessible when unwinding the current task in a
|
|
* non-preemptible context.
|
|
*/
|
|
#define STACKINFO_CPU(name) \
|
|
({ \
|
|
((task == current) && !preemptible()) \
|
|
? stackinfo_get_##name() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
/*
|
|
* SDEI stacks are only accessible when unwinding the current task in an NMI
|
|
* context.
|
|
*/
|
|
#define STACKINFO_SDEI(name) \
|
|
({ \
|
|
((task == current) && in_nmi()) \
|
|
? stackinfo_get_sdei_##name() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
#define STACKINFO_EFI \
|
|
({ \
|
|
((task == current) && current_in_efi()) \
|
|
? stackinfo_get_efi() \
|
|
: stackinfo_get_unknown(); \
|
|
})
|
|
|
|
noinline noinstr void arch_stack_walk(stack_trace_consume_fn consume_entry,
|
|
void *cookie, struct task_struct *task,
|
|
struct pt_regs *regs)
|
|
{
|
|
struct stack_info stacks[] = {
|
|
stackinfo_get_task(task),
|
|
STACKINFO_CPU(irq),
|
|
#if defined(CONFIG_VMAP_STACK)
|
|
STACKINFO_CPU(overflow),
|
|
#endif
|
|
#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_ARM_SDE_INTERFACE)
|
|
STACKINFO_SDEI(normal),
|
|
STACKINFO_SDEI(critical),
|
|
#endif
|
|
#ifdef CONFIG_EFI
|
|
STACKINFO_EFI,
|
|
#endif
|
|
};
|
|
struct unwind_state state = {
|
|
.stacks = stacks,
|
|
.nr_stacks = ARRAY_SIZE(stacks),
|
|
};
|
|
|
|
if (regs) {
|
|
if (task != current)
|
|
return;
|
|
unwind_init_from_regs(&state, regs);
|
|
} else if (task == current) {
|
|
unwind_init_from_caller(&state);
|
|
} else {
|
|
unwind_init_from_task(&state, task);
|
|
}
|
|
|
|
unwind(&state, consume_entry, cookie);
|
|
}
|
|
|
|
static bool dump_backtrace_entry(void *arg, unsigned long where)
|
|
{
|
|
char *loglvl = arg;
|
|
printk("%s %pSb\n", loglvl, (void *)where);
|
|
return true;
|
|
}
|
|
|
|
void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
|
|
const char *loglvl)
|
|
{
|
|
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
|
|
|
|
if (regs && user_mode(regs))
|
|
return;
|
|
|
|
if (!tsk)
|
|
tsk = current;
|
|
|
|
if (!try_get_task_stack(tsk))
|
|
return;
|
|
|
|
printk("%sCall trace:\n", loglvl);
|
|
arch_stack_walk(dump_backtrace_entry, (void *)loglvl, tsk, regs);
|
|
|
|
put_task_stack(tsk);
|
|
}
|
|
|
|
void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
|
|
{
|
|
dump_backtrace(NULL, tsk, loglvl);
|
|
barrier();
|
|
}
|