linux/arch/x86/kernel/perf_regs.c
Linus Torvalds 405f868f13 - Remove all uses of TIF_IA32 and TIF_X32 and reclaim the two bits in the end
(Gabriel Krisman Bertazi)
 
 - All kinds of minor cleanups all over the tree.
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEzv7L6UO9uDPlPSfHEsHwGGHeVUoFAl/XgtoACgkQEsHwGGHe
 VUqGuA/9GqN2zNQdhgRvAQ+FLZiOYK9MfXcoayfMq8T61VRPDBWaQRfVYKmfmEjS
 0l5OnYgZQ9n6vzqFy6pmgc/ix8Jr553dZp5NCamcOqjCTcuO/LwRRh+ZBeFSBTPi
 r2qFYKKRYvM7nbyUMm4WqvAakxJ18xsjNbIslr9Aqe8WtHBKKX3MOu8SOpFtGyXz
 aEc4rhsS45iZa5gTXhvOn73tr3yHGWU1rzyyAAAmDGTgAxRwsTna8v16C4+v+Bua
 Zg18Wiutj8ZjtFpzKJtGWGZoSBap3Jw2Ys64g42MBQUE56KY/99tQVo/SvbYvvlf
 PHWLH0f3rPNJ6J2qeKwhtNzPlEAH/6e416A1/6TVwsK+8pdfGmkfaQh2iDHLhJ5i
 CSwF61H44ZaE3pc1tHHbC5ALvydPlup7D4MKgztfq0mZ3OoV2Vg7dtyyr+Ybz72b
 G+Kl/tmyacQTXo0FiYbZKETo3/VfTdBXGyVax1rHkx3pt8zvhFg3kxb1TT/l/CoM
 eSTx53PtTdVtbGOq1CjnUm0FKlbh4+kLoNuo9DYKeXUQBs8PWOCZmL3wXmm4cqlZ
 mDZVWvll7CjToY8izzcE/AG279cWkgcL5Tcg7W7CR66+egfDdpuqOZ4tv4TyzoWq
 0J7WeNj+TAo98b7RA0Ux8LOlszRxS2ykuI6uB2MgwCaRMbbaQao=
 =lLiH
 -----END PGP SIGNATURE-----

Merge tag 'x86_cleanups_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 cleanups from Borislav Petkov:
 "Another branch with a nicely negative diffstat, just the way I
  like 'em:

   - Remove all uses of TIF_IA32 and TIF_X32 and reclaim the two bits in
     the end (Gabriel Krisman Bertazi)

   - All kinds of minor cleanups all over the tree"

* tag 'x86_cleanups_for_v5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (24 commits)
  x86/ia32_signal: Propagate __user annotation properly
  x86/alternative: Update text_poke_bp() kernel-doc comment
  x86/PCI: Make a kernel-doc comment a normal one
  x86/asm: Drop unused RDPID macro
  x86/boot/compressed/64: Use TEST %reg,%reg instead of CMP $0,%reg
  x86/head64: Remove duplicate include
  x86/mm: Declare 'start' variable where it is used
  x86/head/64: Remove unused GET_CR2_INTO() macro
  x86/boot: Remove unused finalize_identity_maps()
  x86/uaccess: Document copy_from_user_nmi()
  x86/dumpstack: Make show_trace_log_lvl() static
  x86/mtrr: Fix a kernel-doc markup
  x86/setup: Remove unused MCA variables
  x86, libnvdimm/test: Remove COPY_MC_TEST
  x86: Reclaim TIF_IA32 and TIF_X32
  x86/mm: Convert mmu context ia32_compat into a proper flags field
  x86/elf: Use e_machine to check for x32/ia32 in setup_additional_pages()
  elf: Expose ELF header on arch_setup_additional_pages()
  x86/elf: Use e_machine to select start_thread for x32
  elf: Expose ELF header in compat_start_thread()
  ...
2020-12-14 13:45:26 -08:00

203 lines
5.6 KiB
C

// SPDX-License-Identifier: GPL-2.0
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/sched/task_stack.h>
#include <linux/perf_event.h>
#include <linux/bug.h>
#include <linux/stddef.h>
#include <asm/perf_regs.h>
#include <asm/ptrace.h>
#ifdef CONFIG_X86_32
#define PERF_REG_X86_MAX PERF_REG_X86_32_MAX
#else
#define PERF_REG_X86_MAX PERF_REG_X86_64_MAX
#endif
#define PT_REGS_OFFSET(id, r) [id] = offsetof(struct pt_regs, r)
static unsigned int pt_regs_offset[PERF_REG_X86_MAX] = {
PT_REGS_OFFSET(PERF_REG_X86_AX, ax),
PT_REGS_OFFSET(PERF_REG_X86_BX, bx),
PT_REGS_OFFSET(PERF_REG_X86_CX, cx),
PT_REGS_OFFSET(PERF_REG_X86_DX, dx),
PT_REGS_OFFSET(PERF_REG_X86_SI, si),
PT_REGS_OFFSET(PERF_REG_X86_DI, di),
PT_REGS_OFFSET(PERF_REG_X86_BP, bp),
PT_REGS_OFFSET(PERF_REG_X86_SP, sp),
PT_REGS_OFFSET(PERF_REG_X86_IP, ip),
PT_REGS_OFFSET(PERF_REG_X86_FLAGS, flags),
PT_REGS_OFFSET(PERF_REG_X86_CS, cs),
PT_REGS_OFFSET(PERF_REG_X86_SS, ss),
#ifdef CONFIG_X86_32
PT_REGS_OFFSET(PERF_REG_X86_DS, ds),
PT_REGS_OFFSET(PERF_REG_X86_ES, es),
PT_REGS_OFFSET(PERF_REG_X86_FS, fs),
PT_REGS_OFFSET(PERF_REG_X86_GS, gs),
#else
/*
* The pt_regs struct does not store
* ds, es, fs, gs in 64 bit mode.
*/
(unsigned int) -1,
(unsigned int) -1,
(unsigned int) -1,
(unsigned int) -1,
#endif
#ifdef CONFIG_X86_64
PT_REGS_OFFSET(PERF_REG_X86_R8, r8),
PT_REGS_OFFSET(PERF_REG_X86_R9, r9),
PT_REGS_OFFSET(PERF_REG_X86_R10, r10),
PT_REGS_OFFSET(PERF_REG_X86_R11, r11),
PT_REGS_OFFSET(PERF_REG_X86_R12, r12),
PT_REGS_OFFSET(PERF_REG_X86_R13, r13),
PT_REGS_OFFSET(PERF_REG_X86_R14, r14),
PT_REGS_OFFSET(PERF_REG_X86_R15, r15),
#endif
};
u64 perf_reg_value(struct pt_regs *regs, int idx)
{
struct x86_perf_regs *perf_regs;
if (idx >= PERF_REG_X86_XMM0 && idx < PERF_REG_X86_XMM_MAX) {
perf_regs = container_of(regs, struct x86_perf_regs, regs);
if (!perf_regs->xmm_regs)
return 0;
return perf_regs->xmm_regs[idx - PERF_REG_X86_XMM0];
}
if (WARN_ON_ONCE(idx >= ARRAY_SIZE(pt_regs_offset)))
return 0;
return regs_get_register(regs, pt_regs_offset[idx]);
}
#define PERF_REG_X86_RESERVED (((1ULL << PERF_REG_X86_XMM0) - 1) & \
~((1ULL << PERF_REG_X86_MAX) - 1))
#ifdef CONFIG_X86_32
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
(1ULL << PERF_REG_X86_R9) | \
(1ULL << PERF_REG_X86_R10) | \
(1ULL << PERF_REG_X86_R11) | \
(1ULL << PERF_REG_X86_R12) | \
(1ULL << PERF_REG_X86_R13) | \
(1ULL << PERF_REG_X86_R14) | \
(1ULL << PERF_REG_X86_R15))
int perf_reg_validate(u64 mask)
{
if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
regs_user->regs = task_pt_regs(current);
regs_user->abi = perf_reg_abi(current);
}
#else /* CONFIG_X86_64 */
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
(1ULL << PERF_REG_X86_FS) | \
(1ULL << PERF_REG_X86_GS))
int perf_reg_validate(u64 mask)
{
if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
return -EINVAL;
return 0;
}
u64 perf_reg_abi(struct task_struct *task)
{
if (!user_64bit_mode(task_pt_regs(task)))
return PERF_SAMPLE_REGS_ABI_32;
else
return PERF_SAMPLE_REGS_ABI_64;
}
static DEFINE_PER_CPU(struct pt_regs, nmi_user_regs);
void perf_get_regs_user(struct perf_regs *regs_user,
struct pt_regs *regs)
{
struct pt_regs *regs_user_copy = this_cpu_ptr(&nmi_user_regs);
struct pt_regs *user_regs = task_pt_regs(current);
if (!in_nmi()) {
regs_user->regs = user_regs;
regs_user->abi = perf_reg_abi(current);
return;
}
/*
* If we're in an NMI that interrupted task_pt_regs setup, then
* we can't sample user regs at all. This check isn't really
* sufficient, though, as we could be in an NMI inside an interrupt
* that happened during task_pt_regs setup.
*/
if (regs->sp > (unsigned long)&user_regs->r11 &&
regs->sp <= (unsigned long)(user_regs + 1)) {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
return;
}
/*
* These registers are always saved on 64-bit syscall entry.
* On 32-bit entry points, they are saved too except r8..r11.
*/
regs_user_copy->ip = user_regs->ip;
regs_user_copy->ax = user_regs->ax;
regs_user_copy->cx = user_regs->cx;
regs_user_copy->dx = user_regs->dx;
regs_user_copy->si = user_regs->si;
regs_user_copy->di = user_regs->di;
regs_user_copy->r8 = user_regs->r8;
regs_user_copy->r9 = user_regs->r9;
regs_user_copy->r10 = user_regs->r10;
regs_user_copy->r11 = user_regs->r11;
regs_user_copy->orig_ax = user_regs->orig_ax;
regs_user_copy->flags = user_regs->flags;
regs_user_copy->sp = user_regs->sp;
regs_user_copy->cs = user_regs->cs;
regs_user_copy->ss = user_regs->ss;
/*
* Store user space frame-pointer value on sample
* to facilitate stack unwinding for cases when
* user space executable code has such support
* enabled at compile time:
*/
regs_user_copy->bp = user_regs->bp;
regs_user_copy->bx = -1;
regs_user_copy->r12 = -1;
regs_user_copy->r13 = -1;
regs_user_copy->r14 = -1;
regs_user_copy->r15 = -1;
/*
* For this to be at all useful, we need a reasonable guess for
* the ABI. Be careful: we're in NMI context, and we're
* considering current to be the current task, so we should
* be careful not to look at any other percpu variables that might
* change during context switches.
*/
regs_user->abi = user_64bit_mode(user_regs) ?
PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
regs_user->regs = regs_user_copy;
}
#endif /* CONFIG_X86_32 */