diff options
author | Anton Vorontsov <anton.vorontsov@linaro.org> | 2012-07-18 01:26:15 +0400 |
---|---|---|
committer | Anton Vorontsov <anton.vorontsov@linaro.org> | 2012-09-07 09:16:58 +0400 |
commit | 65f8c95e46a1827ae8bbc52a817ea308dd7d65ae (patch) | |
tree | adc856e8b50441b055350d8f1d83e3f641c77456 /kernel/trace/trace_functions.c | |
parent | b4a871bce619dc5ca03cc6c78e1c467ceacb8e7e (diff) | |
download | linux-65f8c95e46a1827ae8bbc52a817ea308dd7d65ae.tar.xz |
pstore/ftrace: Convert to its own enable/disable debugfs knob
With this patch we no longer reuse function tracer infrastructure, now
we register our own tracer back-end via a debugfs knob.
It's a bit more code, but that is the only downside. On the bright side we
have:
- Ability to make persistent_ram module removable (when needed, we can
move ftrace_ops struct into a module). Note that persistent_ram is still
not removable for other reasons, but with this patch it's just one
thing less to worry about;
- Pstore part is more isolated from the generic function tracer. We tried
it already by registering our own tracer in available_tracers, but that
way we're loosing ability to see the traces while we record them to
pstore. This solution is somewhere in the middle: we only register
"internal ftracer" back-end, but not the "front-end";
- When there is only pstore tracing enabled, the kernel will only write
to the pstore buffer, omitting function tracer buffer (which, of course,
still can be enabled via 'echo function > current_tracer').
Suggested-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Anton Vorontsov <anton.vorontsov@linaro.org>
Diffstat (limited to 'kernel/trace/trace_functions.c')
-rw-r--r-- | kernel/trace/trace_functions.c | 15 |
1 files changed, 1 insertions, 14 deletions
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c index a426f410c060..0ad83e3929d1 100644 --- a/kernel/trace/trace_functions.c +++ b/kernel/trace/trace_functions.c @@ -13,7 +13,6 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/ftrace.h> -#include <linux/pstore.h> #include <linux/fs.h> #include "trace.h" @@ -75,10 +74,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) preempt_enable_notrace(); } -/* Our two options */ +/* Our option */ enum { TRACE_FUNC_OPT_STACK = 0x1, - TRACE_FUNC_OPT_PSTORE = 0x2, }; static struct tracer_flags func_flags; @@ -106,12 +104,6 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) disabled = atomic_inc_return(&data->disabled); if (likely(disabled == 1)) { - /* - * So far tracing doesn't support multiple buffers, so - * we make an explicit call for now. - */ - if (unlikely(func_flags.val & TRACE_FUNC_OPT_PSTORE)) - pstore_ftrace_call(ip, parent_ip); pc = preempt_count(); trace_function(tr, ip, parent_ip, flags, pc); } @@ -177,9 +169,6 @@ static struct tracer_opt func_opts[] = { #ifdef CONFIG_STACKTRACE { TRACER_OPT(func_stack_trace, TRACE_FUNC_OPT_STACK) }, #endif -#ifdef CONFIG_PSTORE_FTRACE - { TRACER_OPT(func_pstore, TRACE_FUNC_OPT_PSTORE) }, -#endif { } /* Always set a last empty entry */ }; @@ -232,8 +221,6 @@ static int func_set_flag(u32 old_flags, u32 bit, int set) } break; - case TRACE_FUNC_OPT_PSTORE: - break; default: return -EINVAL; } |