mirror of
https://github.com/torvalds/linux
synced 2024-11-05 18:23:50 +00:00
tracing: Add alloc_snapshot kernel command line parameter
If debugging the kernel, and the developer wants to use tracing_snapshot() in places where tracing_snapshot_alloc() may be difficult (or more likely, the developer is lazy and doesn't want to bother with tracing_snapshot_alloc() at all), then adding alloc_snapshot to the kernel command line parameter will tell ftrace to allocate the snapshot buffer (if configured) when it allocates the main tracing buffer. I also noticed that ring_buffer_expanded and tracing_selftest_disabled had inconsistent use of boolean "true" and "false" with "0" and "1". I cleaned that up too. Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
This commit is contained in:
parent
f4e781c0a8
commit
55034cd6e6
4 changed files with 60 additions and 38 deletions
|
@ -320,6 +320,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
|
||||||
on: enable for both 32- and 64-bit processes
|
on: enable for both 32- and 64-bit processes
|
||||||
off: disable for both 32- and 64-bit processes
|
off: disable for both 32- and 64-bit processes
|
||||||
|
|
||||||
|
alloc_snapshot [FTRACE]
|
||||||
|
Allocate the ftrace snapshot buffer on boot up when the
|
||||||
|
main buffer is allocated. This is handy if debugging
|
||||||
|
and you need to use tracing_snapshot() on boot up, and
|
||||||
|
do not want to use tracing_snapshot_alloc() as it needs
|
||||||
|
to be done where GFP_KERNEL allocations are allowed.
|
||||||
|
|
||||||
amd_iommu= [HW,X86-64]
|
amd_iommu= [HW,X86-64]
|
||||||
Pass parameters to the AMD IOMMU driver in the system.
|
Pass parameters to the AMD IOMMU driver in the system.
|
||||||
Possible values are:
|
Possible values are:
|
||||||
|
|
|
@ -47,7 +47,7 @@
|
||||||
* On boot up, the ring buffer is set to the minimum size, so that
|
* On boot up, the ring buffer is set to the minimum size, so that
|
||||||
* we do not waste memory on systems that are not using tracing.
|
* we do not waste memory on systems that are not using tracing.
|
||||||
*/
|
*/
|
||||||
int ring_buffer_expanded;
|
bool ring_buffer_expanded;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* We need to change this state when a selftest is running.
|
* We need to change this state when a selftest is running.
|
||||||
|
@ -121,12 +121,14 @@ static int tracing_set_tracer(const char *buf);
|
||||||
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
|
||||||
static char *default_bootup_tracer;
|
static char *default_bootup_tracer;
|
||||||
|
|
||||||
|
static bool allocate_snapshot;
|
||||||
|
|
||||||
static int __init set_cmdline_ftrace(char *str)
|
static int __init set_cmdline_ftrace(char *str)
|
||||||
{
|
{
|
||||||
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
|
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
|
||||||
default_bootup_tracer = bootup_tracer_buf;
|
default_bootup_tracer = bootup_tracer_buf;
|
||||||
/* We are using ftrace early, expand it */
|
/* We are using ftrace early, expand it */
|
||||||
ring_buffer_expanded = 1;
|
ring_buffer_expanded = true;
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
__setup("ftrace=", set_cmdline_ftrace);
|
__setup("ftrace=", set_cmdline_ftrace);
|
||||||
|
@ -147,6 +149,15 @@ static int __init set_ftrace_dump_on_oops(char *str)
|
||||||
}
|
}
|
||||||
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
|
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
|
||||||
|
|
||||||
|
static int __init alloc_snapshot(char *str)
|
||||||
|
{
|
||||||
|
allocate_snapshot = true;
|
||||||
|
/* We also need the main ring buffer expanded */
|
||||||
|
ring_buffer_expanded = true;
|
||||||
|
return 1;
|
||||||
|
}
|
||||||
|
__setup("alloc_snapshot", alloc_snapshot);
|
||||||
|
|
||||||
|
|
||||||
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
|
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
|
||||||
static char *trace_boot_options __initdata;
|
static char *trace_boot_options __initdata;
|
||||||
|
@ -951,7 +962,7 @@ int register_tracer(struct tracer *type)
|
||||||
tracing_set_tracer(type->name);
|
tracing_set_tracer(type->name);
|
||||||
default_bootup_tracer = NULL;
|
default_bootup_tracer = NULL;
|
||||||
/* disable other selftests, since this will break it. */
|
/* disable other selftests, since this will break it. */
|
||||||
tracing_selftest_disabled = 1;
|
tracing_selftest_disabled = true;
|
||||||
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
#ifdef CONFIG_FTRACE_STARTUP_TEST
|
||||||
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
|
printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
|
||||||
type->name);
|
type->name);
|
||||||
|
@ -3318,7 +3329,7 @@ static int __tracing_resize_ring_buffer(struct trace_array *tr,
|
||||||
* we use the size that was given, and we can forget about
|
* we use the size that was given, and we can forget about
|
||||||
* expanding it later.
|
* expanding it later.
|
||||||
*/
|
*/
|
||||||
ring_buffer_expanded = 1;
|
ring_buffer_expanded = true;
|
||||||
|
|
||||||
/* May be called before buffers are initialized */
|
/* May be called before buffers are initialized */
|
||||||
if (!tr->trace_buffer.buffer)
|
if (!tr->trace_buffer.buffer)
|
||||||
|
@ -5396,53 +5407,57 @@ static void init_trace_buffers(struct trace_array *tr, struct trace_buffer *buf)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static int allocate_trace_buffers(struct trace_array *tr, int size)
|
static int
|
||||||
|
allocate_trace_buffer(struct trace_array *tr, struct trace_buffer *buf, int size)
|
||||||
{
|
{
|
||||||
enum ring_buffer_flags rb_flags;
|
enum ring_buffer_flags rb_flags;
|
||||||
|
|
||||||
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
|
||||||
|
|
||||||
tr->trace_buffer.buffer = ring_buffer_alloc(size, rb_flags);
|
buf->buffer = ring_buffer_alloc(size, rb_flags);
|
||||||
if (!tr->trace_buffer.buffer)
|
if (!buf->buffer)
|
||||||
goto out_free;
|
return -ENOMEM;
|
||||||
|
|
||||||
tr->trace_buffer.data = alloc_percpu(struct trace_array_cpu);
|
buf->data = alloc_percpu(struct trace_array_cpu);
|
||||||
if (!tr->trace_buffer.data)
|
if (!buf->data) {
|
||||||
goto out_free;
|
ring_buffer_free(buf->buffer);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
|
||||||
init_trace_buffers(tr, &tr->trace_buffer);
|
init_trace_buffers(tr, buf);
|
||||||
|
|
||||||
/* Allocate the first page for all buffers */
|
/* Allocate the first page for all buffers */
|
||||||
set_buffer_entries(&tr->trace_buffer,
|
set_buffer_entries(&tr->trace_buffer,
|
||||||
ring_buffer_size(tr->trace_buffer.buffer, 0));
|
ring_buffer_size(tr->trace_buffer.buffer, 0));
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int allocate_trace_buffers(struct trace_array *tr, int size)
|
||||||
|
{
|
||||||
|
int ret;
|
||||||
|
|
||||||
|
ret = allocate_trace_buffer(tr, &tr->trace_buffer, size);
|
||||||
|
if (ret)
|
||||||
|
return ret;
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
#ifdef CONFIG_TRACER_MAX_TRACE
|
||||||
|
ret = allocate_trace_buffer(tr, &tr->max_buffer,
|
||||||
|
allocate_snapshot ? size : 1);
|
||||||
|
if (WARN_ON(ret)) {
|
||||||
|
ring_buffer_free(tr->trace_buffer.buffer);
|
||||||
|
free_percpu(tr->trace_buffer.data);
|
||||||
|
return -ENOMEM;
|
||||||
|
}
|
||||||
|
tr->allocated_snapshot = allocate_snapshot;
|
||||||
|
|
||||||
tr->max_buffer.buffer = ring_buffer_alloc(1, rb_flags);
|
/*
|
||||||
if (!tr->max_buffer.buffer)
|
* Only the top level trace array gets its snapshot allocated
|
||||||
goto out_free;
|
* from the kernel command line.
|
||||||
|
*/
|
||||||
tr->max_buffer.data = alloc_percpu(struct trace_array_cpu);
|
allocate_snapshot = false;
|
||||||
if (!tr->max_buffer.data)
|
|
||||||
goto out_free;
|
|
||||||
|
|
||||||
init_trace_buffers(tr, &tr->max_buffer);
|
|
||||||
|
|
||||||
set_buffer_entries(&tr->max_buffer, 1);
|
|
||||||
#endif
|
#endif
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out_free:
|
|
||||||
if (tr->trace_buffer.buffer)
|
|
||||||
ring_buffer_free(tr->trace_buffer.buffer);
|
|
||||||
free_percpu(tr->trace_buffer.data);
|
|
||||||
|
|
||||||
#ifdef CONFIG_TRACER_MAX_TRACE
|
|
||||||
if (tr->max_buffer.buffer)
|
|
||||||
ring_buffer_free(tr->max_buffer.buffer);
|
|
||||||
free_percpu(tr->max_buffer.data);
|
|
||||||
#endif
|
|
||||||
return -ENOMEM;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static int new_instance_create(const char *name)
|
static int new_instance_create(const char *name)
|
||||||
|
|
|
@ -660,7 +660,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
|
||||||
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
|
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
|
||||||
extern int DYN_FTRACE_TEST_NAME2(void);
|
extern int DYN_FTRACE_TEST_NAME2(void);
|
||||||
|
|
||||||
extern int ring_buffer_expanded;
|
extern bool ring_buffer_expanded;
|
||||||
extern bool tracing_selftest_disabled;
|
extern bool tracing_selftest_disabled;
|
||||||
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
DECLARE_PER_CPU(int, ftrace_cpu_disabled);
|
||||||
|
|
||||||
|
|
|
@ -1844,8 +1844,8 @@ static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
|
||||||
static __init int setup_trace_event(char *str)
|
static __init int setup_trace_event(char *str)
|
||||||
{
|
{
|
||||||
strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
|
strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
|
||||||
ring_buffer_expanded = 1;
|
ring_buffer_expanded = true;
|
||||||
tracing_selftest_disabled = 1;
|
tracing_selftest_disabled = true;
|
||||||
|
|
||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue