tracing/function-graph-tracer: append the tracing_graph_flag

Impact: Provide a way to pause the function graph tracer

As suggested by Steven Rostedt, the previous patch that prevented from
spinlock function tracing shouldn't use the raw_spinlock to fix it.
It's much better to follow lockdep with normal spinlock, so this patch
adds a new flag for each task to make the function graph tracer able
to be paused. We also can send an ftrace_printk whithout worrying of
the irrelevant traced spinlock during insertion.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Frederic Weisbecker 2008-12-06 03:43:41 +01:00 committed by Ingo Molnar
parent 8e1b82e086
commit 380c4b1411
5 changed files with 26 additions and 14 deletions

View file

@ -476,7 +476,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
&return_to_handler;
/* Nmi's are currently unsupported */
if (atomic_read(&in_nmi))
if (unlikely(atomic_read(&in_nmi)))
return;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
/*

View file

@ -401,6 +401,16 @@ static inline int task_curr_ret_stack(struct task_struct *t)
{
return t->curr_ret_stack;
}
static inline void pause_graph_tracing(void)
{
atomic_inc(&current->tracing_graph_pause);
}
static inline void unpause_graph_tracing(void)
{
atomic_dec(&current->tracing_graph_pause);
}
#else
#define __notrace_funcgraph
@ -412,6 +422,9 @@ static inline int task_curr_ret_stack(struct task_struct *tsk)
{
return -1;
}
static inline void pause_graph_tracing(void) { }
static inline void unpause_graph_tracing(void) { }
#endif
#ifdef CONFIG_TRACING

View file

@ -1379,6 +1379,8 @@ struct task_struct {
* because of depth overrun.
*/
atomic_t trace_overrun;
/* Pause for the tracing */
atomic_t tracing_graph_pause;
#endif
#ifdef CONFIG_TRACING
/* state flags for use by tracers */

View file

@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
/* Make sure IRQs see the -1 first: */
barrier();
t->ret_stack = ret_stack_list[start++];
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
}
} while_each_thread(g, t);
@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
if (!t->ret_stack)
return;
t->curr_ret_stack = -1;
atomic_set(&t->tracing_graph_pause, 0);
atomic_set(&t->trace_overrun, 0);
} else
t->ret_stack = NULL;

View file

@ -3590,14 +3590,7 @@ static __init int tracer_init_debugfs(void)
int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
{
/*
* Raw Spinlock because a normal spinlock would be traced here
* and append an irrelevant couple spin_lock_irqsave/
* spin_unlock_irqrestore traced by ftrace around this
* TRACE_PRINTK trace.
*/
static raw_spinlock_t trace_buf_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
static DEFINE_SPINLOCK(trace_buf_lock);
static char trace_buf[TRACE_BUF_SIZE];
struct ring_buffer_event *event;
@ -3618,8 +3611,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
if (unlikely(atomic_read(&data->disabled)))
goto out;
local_irq_save(flags);
__raw_spin_lock(&trace_buf_lock);
pause_graph_tracing();
spin_lock_irqsave(&trace_buf_lock, irq_flags);
len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
len = min(len, TRACE_BUF_SIZE-1);
@ -3640,9 +3633,8 @@ int trace_vprintk(unsigned long ip, int depth, const char *fmt, va_list args)
ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
out_unlock:
__raw_spin_unlock(&trace_buf_lock);
local_irq_restore(flags);
spin_unlock_irqrestore(&trace_buf_lock, irq_flags);
unpause_graph_tracing();
out:
preempt_enable_notrace();