linux/arch/parisc/kernel/ftrace.c
Linus Torvalds 79ef0c0014 Tracing updates for 5.16:
- kprobes: Restructured stack unwinder to show properly on x86 when a stack
   dump happens from a kretprobe callback.
 
 - Fix to bootconfig parsing
 
 - Have tracefs allow owner and group permissions by default (only denying
   others). There's been pressure to allow non root to tracefs in a
   controlled fashion, and using groups is probably the safest.
 
 - Bootconfig memory managament updates.
 
 - Bootconfig clean up to have the tools directory be less dependent on
   changes in the kernel tree.
 
 - Allow perf to be traced by function tracer.
 
 - Rewrite of function graph tracer to be a callback from the function tracer
   instead of having its own trampoline (this change will happen on an arch
   by arch basis, and currently only x86_64 implements it).
 
 - Allow multiple direct trampolines (bpf hooks to functions) be batched
   together in one synchronization.
 
 - Allow histogram triggers to add variables that can perform calculations
   against the event's fields.
 
 - Use the linker to determine architecture callbacks from the ftrace
   trampoline to allow for proper parameter prototypes and prevent warnings
   from the compiler.
 
 - Extend histogram triggers to key off of variables.
 
 - Have trace recursion use bit magic to determine preempt context over if
   branches.
 
 - Have trace recursion disable preemption as all use cases do anyway.
 
 - Added testing for verification of tracing utilities.
 
 - Various small clean ups and fixes.
 -----BEGIN PGP SIGNATURE-----
 
 iIoEABYIADIWIQRRSw7ePDh/lE+zeZMp5XQQmuv6qgUCYYBdxhQccm9zdGVkdEBn
 b29kbWlzLm9yZwAKCRAp5XQQmuv6qp1sAQD2oYFwaG3sx872gj/myBcHIBSKdiki
 Hry5csd8zYDBpgD+Poylopt5JIbeDuoYw/BedgEXmscZ8Qr7VzjAXdnv/Q4=
 =Loz8
 -----END PGP SIGNATURE-----

Merge tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:

 - kprobes: Restructured stack unwinder to show properly on x86 when a
   stack dump happens from a kretprobe callback.

 - Fix to bootconfig parsing

 - Have tracefs allow owner and group permissions by default (only
   denying others). There's been pressure to allow non root to tracefs
   in a controlled fashion, and using groups is probably the safest.

 - Bootconfig memory managament updates.

 - Bootconfig clean up to have the tools directory be less dependent on
   changes in the kernel tree.

 - Allow perf to be traced by function tracer.

 - Rewrite of function graph tracer to be a callback from the function
   tracer instead of having its own trampoline (this change will happen
   on an arch by arch basis, and currently only x86_64 implements it).

 - Allow multiple direct trampolines (bpf hooks to functions) be batched
   together in one synchronization.

 - Allow histogram triggers to add variables that can perform
   calculations against the event's fields.

 - Use the linker to determine architecture callbacks from the ftrace
   trampoline to allow for proper parameter prototypes and prevent
   warnings from the compiler.

 - Extend histogram triggers to key off of variables.

 - Have trace recursion use bit magic to determine preempt context over
   if branches.

 - Have trace recursion disable preemption as all use cases do anyway.

 - Added testing for verification of tracing utilities.

 - Various small clean ups and fixes.

* tag 'trace-v5.16' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (101 commits)
  tracing/histogram: Fix semicolon.cocci warnings
  tracing/histogram: Fix documentation inline emphasis warning
  tracing: Increase PERF_MAX_TRACE_SIZE to handle Sentinel1 and docker together
  tracing: Show size of requested perf buffer
  bootconfig: Initialize ret in xbc_parse_tree()
  ftrace: do CPU checking after preemption disabled
  ftrace: disable preemption when recursion locked
  tracing/histogram: Document expression arithmetic and constants
  tracing/histogram: Optimize division by a power of 2
  tracing/histogram: Covert expr to const if both operands are constants
  tracing/histogram: Simplify handling of .sym-offset in expressions
  tracing: Fix operator precedence for hist triggers expression
  tracing: Add division and multiplication support for hist triggers
  tracing: Add support for creating hist trigger variables from literal
  selftests/ftrace: Stop tracing while reading the trace file by default
  MAINTAINERS: Update KPROBES and TRACING entries
  test_kprobes: Move it from kernel/ to lib/
  docs, kprobes: Remove invalid URL and add new reference
  samples/kretprobes: Fix return value if register_kretprobe() failed
  lib/bootconfig: Fix the xbc_get_info kerneldoc
  ...
2021-11-01 20:05:19 -07:00

252 lines
5.5 KiB
C

// SPDX-License-Identifier: GPL-2.0
/*
* Code for tracing calls in Linux kernel.
* Copyright (C) 2009-2016 Helge Deller <deller@gmx.de>
*
* based on code for x86 which is:
* Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
*
* future possible enhancements:
* - add CONFIG_STACK_TRACER
*/
#include <linux/init.h>
#include <linux/ftrace.h>
#include <linux/uaccess.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/jump_label.h>
#include <asm/assembly.h>
#include <asm/sections.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
#define __hot __section(".text.hot")
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static DEFINE_STATIC_KEY_FALSE(ftrace_graph_enable);
/*
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
static void __hot prepare_ftrace_return(unsigned long *parent,
unsigned long self_addr)
{
unsigned long old;
extern int parisc_return_to_handler;
if (unlikely(ftrace_graph_is_dead()))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
if (!function_graph_enter(old, self_addr, 0, NULL))
/* activate parisc_return_to_handler() as return point */
*parent = (unsigned long) &parisc_return_to_handler;
}
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
static ftrace_func_t ftrace_func;
void notrace __hot ftrace_function_trampoline(unsigned long parent,
unsigned long self_addr,
unsigned long org_sp_gr3,
struct ftrace_regs *fregs)
{
extern struct ftrace_ops *function_trace_op;
ftrace_func(self_addr, parent, function_trace_op, fregs);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (static_branch_unlikely(&ftrace_graph_enable)) {
unsigned long *parent_rp;
/* calculate pointer to %rp in stack */
parent_rp = (unsigned long *) (org_sp_gr3 - RP_OFFSET);
/* sanity check: parent_rp should hold parent */
if (*parent_rp != parent)
return;
prepare_ftrace_return(parent_rp, self_addr);
return;
}
#endif
}
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
int ftrace_enable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
return 0;
}
int ftrace_disable_ftrace_graph_caller(void)
{
static_key_enable(&ftrace_graph_enable.key);
return 0;
}
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
int ftrace_update_ftrace_func(ftrace_func_t func)
{
ftrace_func = func;
return 0;
}
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return 0;
}
unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr+(FTRACE_PATCHABLE_FUNCTION_SIZE-1)*4;
}
int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
u32 *tramp;
int size, ret, i;
void *ip;
#ifdef CONFIG_64BIT
unsigned long addr2 =
(unsigned long)dereference_function_descriptor((void *)addr);
u32 ftrace_trampoline[] = {
0x73c10208, /* std,ma r1,100(sp) */
0x0c2110c1, /* ldd -10(r1),r1 */
0xe820d002, /* bve,n (r1) */
addr2 >> 32,
addr2 & 0xffffffff,
0xe83f1fd7, /* b,l,n .-14,r1 */
};
u32 ftrace_trampoline_unaligned[] = {
addr2 >> 32,
addr2 & 0xffffffff,
0x37de0200, /* ldo 100(sp),sp */
0x73c13e01, /* std r1,-100(sp) */
0x34213ff9, /* ldo -4(r1),r1 */
0x50213fc1, /* ldd -20(r1),r1 */
0xe820d002, /* bve,n (r1) */
0xe83f1fcf, /* b,l,n .-20,r1 */
};
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline_unaligned) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
#else
u32 ftrace_trampoline[] = {
(u32)addr,
0x6fc10080, /* stw,ma r1,40(sp) */
0x48213fd1, /* ldw -18(r1),r1 */
0xe820c002, /* bv,n r0(r1) */
0xe83f1fdf, /* b,l,n .-c,r1 */
};
#endif
BUILD_BUG_ON(ARRAY_SIZE(ftrace_trampoline) >
FTRACE_PATCHABLE_FUNCTION_SIZE);
size = sizeof(ftrace_trampoline);
tramp = ftrace_trampoline;
#ifdef CONFIG_64BIT
if (rec->ip & 0x4) {
size = sizeof(ftrace_trampoline_unaligned);
tramp = ftrace_trampoline_unaligned;
}
#endif
ip = (void *)(rec->ip + 4 - size);
ret = copy_from_kernel_nofault(insn, ip, size);
if (ret)
return ret;
for (i = 0; i < size / 4; i++) {
if (insn[i] != INSN_NOP)
return -EINVAL;
}
__patch_text_multiple(ip, tramp, size);
return 0;
}
int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
unsigned long addr)
{
u32 insn[FTRACE_PATCHABLE_FUNCTION_SIZE];
int i;
for (i = 0; i < ARRAY_SIZE(insn); i++)
insn[i] = INSN_NOP;
__patch_text((void *)rec->ip, INSN_NOP);
__patch_text_multiple((void *)rec->ip + 4 - sizeof(insn),
insn, sizeof(insn)-4);
return 0;
}
#endif
#ifdef CONFIG_KPROBES_ON_FTRACE
void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct ftrace_regs *fregs)
{
struct kprobe_ctlblk *kcb;
struct pt_regs *regs;
struct kprobe *p;
int bit;
bit = ftrace_test_recursion_trylock(ip, parent_ip);
if (bit < 0)
return;
regs = ftrace_get_regs(fregs);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto out;
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
goto out;
}
__this_cpu_write(current_kprobe, p);
kcb = get_kprobe_ctlblk();
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
regs->iaoq[0] = ip;
regs->iaoq[1] = ip + 4;
if (!p->pre_handler || !p->pre_handler(p, regs)) {
regs->iaoq[0] = ip + 4;
regs->iaoq[1] = ip + 8;
if (unlikely(p->post_handler)) {
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
}
__this_cpu_write(current_kprobe, NULL);
out:
ftrace_test_recursion_unlock(bit);
}
NOKPROBE_SYMBOL(kprobe_ftrace_handler);
int arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
return 0;
}
#endif