mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
ca708599ca
Currently we strip the PAC from pointers using C code, which requires generating bitmasks, and conditionally clearing/setting bits depending on bit 55. We can do better by using XPACLRI directly. When the logic was originally written to strip PACs from user pointers, contemporary toolchains used for the kernel had assemblers which were unaware of the PAC instructions. As stripping the PAC from userspace pointers required unconditional clearing of a fixed set of bits (which could be performed with a single instruction), it was simpler to implement the masking in C than it was to make use of XPACI or XPACLRI. When support for in-kernel pointer authentication was added, the stripping logic was extended to cover TTBR1 pointers, requiring several instructions to handle whether to clear/set bits dependent on bit 55 of the pointer. This patch simplifies the stripping of PACs by using XPACLRI directly, as contemporary toolchains do within __builtin_return_address(). This saves a number of instructions, especially where __builtin_return_address() does not implicitly strip the PAC but is heavily used (e.g. with tracepoints). As the kernel might be compiled with an assembler without knowledge of XPACLRI, it is assembled using the 'HINT #7' alias, which results in an identical opcode. At the same time, I've split ptrauth_strip_insn_pac() into ptrauth_strip_user_insn_pac() and ptrauth_strip_kernel_insn_pac() helpers so that we can avoid unnecessary PAC stripping when pointer authentication is not in use in userspace or kernel respectively. The underlying xpaclri() macro uses inline assembly which clobbers x30. The clobber causes the compiler to save/restore the original x30 value in a frame record (protected with PACIASP and AUTIASP when in-kernel authentication is enabled), so this does not provide a gadget to alter the return address. Similarly this does not adversely affect unwinding due to the presence of the frame record. The ptrauth_user_pac_mask() and ptrauth_kernel_pac_mask() are exported from the kernel in ptrace and core dumps, so these are retained. A subsequent patch will move them out of <asm/compiler.h>. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Amit Daniel Kachhap <amit.kachhap@arm.com> Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: James Morse <james.morse@arm.com> Cc: Kristina Martsenko <kristina.martsenko@arm.com> Cc: Will Deacon <will@kernel.org> Link: https://lore.kernel.org/r/20230412160134.306148-3-mark.rutland@arm.com Signed-off-by: Will Deacon <will@kernel.org>
178 lines
4.1 KiB
C
178 lines
4.1 KiB
C
// SPDX-License-Identifier: GPL-2.0-only
|
|
/*
|
|
* arm64 callchain support
|
|
*
|
|
* Copyright (C) 2015 ARM Limited
|
|
*/
|
|
#include <linux/perf_event.h>
|
|
#include <linux/stacktrace.h>
|
|
#include <linux/uaccess.h>
|
|
|
|
#include <asm/pointer_auth.h>
|
|
|
|
struct frame_tail {
|
|
struct frame_tail __user *fp;
|
|
unsigned long lr;
|
|
} __attribute__((packed));
|
|
|
|
/*
|
|
* Get the return address for a single stackframe and return a pointer to the
|
|
* next frame tail.
|
|
*/
|
|
static struct frame_tail __user *
|
|
user_backtrace(struct frame_tail __user *tail,
|
|
struct perf_callchain_entry_ctx *entry)
|
|
{
|
|
struct frame_tail buftail;
|
|
unsigned long err;
|
|
unsigned long lr;
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
if (!access_ok(tail, sizeof(buftail)))
|
|
return NULL;
|
|
|
|
pagefault_disable();
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
return NULL;
|
|
|
|
lr = ptrauth_strip_user_insn_pac(buftail.lr);
|
|
|
|
perf_callchain_store(entry, lr);
|
|
|
|
/*
|
|
* Frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses).
|
|
*/
|
|
if (tail >= buftail.fp)
|
|
return NULL;
|
|
|
|
return buftail.fp;
|
|
}
|
|
|
|
#ifdef CONFIG_COMPAT
|
|
/*
|
|
* The registers we're interested in are at the end of the variable
|
|
* length saved register structure. The fp points at the end of this
|
|
* structure so the address of this struct is:
|
|
* (struct compat_frame_tail *)(xxx->fp)-1
|
|
*
|
|
* This code has been adapted from the ARM OProfile support.
|
|
*/
|
|
struct compat_frame_tail {
|
|
compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */
|
|
u32 sp;
|
|
u32 lr;
|
|
} __attribute__((packed));
|
|
|
|
static struct compat_frame_tail __user *
|
|
compat_user_backtrace(struct compat_frame_tail __user *tail,
|
|
struct perf_callchain_entry_ctx *entry)
|
|
{
|
|
struct compat_frame_tail buftail;
|
|
unsigned long err;
|
|
|
|
/* Also check accessibility of one struct frame_tail beyond */
|
|
if (!access_ok(tail, sizeof(buftail)))
|
|
return NULL;
|
|
|
|
pagefault_disable();
|
|
err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
|
|
pagefault_enable();
|
|
|
|
if (err)
|
|
return NULL;
|
|
|
|
perf_callchain_store(entry, buftail.lr);
|
|
|
|
/*
|
|
* Frame pointers should strictly progress back up the stack
|
|
* (towards higher addresses).
|
|
*/
|
|
if (tail + 1 >= (struct compat_frame_tail __user *)
|
|
compat_ptr(buftail.fp))
|
|
return NULL;
|
|
|
|
return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1;
|
|
}
|
|
#endif /* CONFIG_COMPAT */
|
|
|
|
void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
if (perf_guest_state()) {
|
|
/* We don't support guest os callchain now */
|
|
return;
|
|
}
|
|
|
|
perf_callchain_store(entry, regs->pc);
|
|
|
|
if (!compat_user_mode(regs)) {
|
|
/* AARCH64 mode */
|
|
struct frame_tail __user *tail;
|
|
|
|
tail = (struct frame_tail __user *)regs->regs[29];
|
|
|
|
while (entry->nr < entry->max_stack &&
|
|
tail && !((unsigned long)tail & 0x7))
|
|
tail = user_backtrace(tail, entry);
|
|
} else {
|
|
#ifdef CONFIG_COMPAT
|
|
/* AARCH32 compat mode */
|
|
struct compat_frame_tail __user *tail;
|
|
|
|
tail = (struct compat_frame_tail __user *)regs->compat_fp - 1;
|
|
|
|
while ((entry->nr < entry->max_stack) &&
|
|
tail && !((unsigned long)tail & 0x3))
|
|
tail = compat_user_backtrace(tail, entry);
|
|
#endif
|
|
}
|
|
}
|
|
|
|
static bool callchain_trace(void *data, unsigned long pc)
|
|
{
|
|
struct perf_callchain_entry_ctx *entry = data;
|
|
return perf_callchain_store(entry, pc) == 0;
|
|
}
|
|
|
|
void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
|
|
struct pt_regs *regs)
|
|
{
|
|
if (perf_guest_state()) {
|
|
/* We don't support guest os callchain now */
|
|
return;
|
|
}
|
|
|
|
arch_stack_walk(callchain_trace, entry, current, regs);
|
|
}
|
|
|
|
unsigned long perf_instruction_pointer(struct pt_regs *regs)
|
|
{
|
|
if (perf_guest_state())
|
|
return perf_guest_get_ip();
|
|
|
|
return instruction_pointer(regs);
|
|
}
|
|
|
|
unsigned long perf_misc_flags(struct pt_regs *regs)
|
|
{
|
|
unsigned int guest_state = perf_guest_state();
|
|
int misc = 0;
|
|
|
|
if (guest_state) {
|
|
if (guest_state & PERF_GUEST_USER)
|
|
misc |= PERF_RECORD_MISC_GUEST_USER;
|
|
else
|
|
misc |= PERF_RECORD_MISC_GUEST_KERNEL;
|
|
} else {
|
|
if (user_mode(regs))
|
|
misc |= PERF_RECORD_MISC_USER;
|
|
else
|
|
misc |= PERF_RECORD_MISC_KERNEL;
|
|
}
|
|
|
|
return misc;
|
|
}
|