summaryrefslogtreecommitdiff
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-21 09:39:03 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-21 09:39:03 +0300
commit56f410cf45a1c1f68f77741990e0435b06a07676 (patch)
treeb52f55cd45228474818b5409b722255a0a24ed24 /kernel/trace/trace.c
parent894e21642dde19184f059c485c49abd7ecdd6ec9 (diff)
parenta33d7d94eed92b23fbbc7b0de06a41b2bbaa49e3 (diff)
downloadlinux-56f410cf45a1c1f68f77741990e0435b06a07676.tar.xz
Merge tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace
Pull tracing fixes from Steven Rostedt: - Fix a bug caused by not cleaning up the new instance unique triggers when deleting an instance. It also creates a selftest that triggers that bug. - Fix the delayed optimization happening after kprobes boot up self tests being removed by freeing of init memory. - Comment kprobes on why the delay optimization is not a problem for removal of modules, to keep other developers from searching that riddle. - Fix another case of rcu not watching in stack trace tracing. * tag 'trace-v4.12-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: tracing: Make sure RCU is watching before calling a stack trace kprobes: Document how optimized kprobes are removed from module unload selftests/ftrace: Add test to remove instance with active event triggers selftests/ftrace: Fix bashisms ftrace: Remove #ifdef from code and add clear_ftrace_function_probes() stub ftrace/instances: Clear function triggers when removing instances ftrace: Simplify glob handling in unregister_ftrace_function_probe_func() tracing/kprobes: Enforce kprobes teardown after testing tracing: Move postpone selftests to core from early_initcall
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c34
1 files changed, 32 insertions, 2 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c4536c449021..1122f151466f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1558,7 +1558,7 @@ static __init int init_trace_selftests(void)
return 0;
}
-early_initcall(init_trace_selftests);
+core_initcall(init_trace_selftests);
#else
static inline int run_tracer_selftest(struct tracer *type)
{
@@ -2568,7 +2568,36 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc)
{
- __ftrace_trace_stack(tr->trace_buffer.buffer, flags, skip, pc, NULL);
+ struct ring_buffer *buffer = tr->trace_buffer.buffer;
+
+ if (rcu_is_watching()) {
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ return;
+ }
+
+ /*
+ * When an NMI triggers, RCU is enabled via rcu_nmi_enter(),
+ * but if the above rcu_is_watching() failed, then the NMI
+ * triggered someplace critical, and rcu_irq_enter() should
+ * not be called from NMI.
+ */
+ if (unlikely(in_nmi()))
+ return;
+
+ /*
+ * It is possible that a function is being traced in a
+ * location that RCU is not watching. A call to
+ * rcu_irq_enter() will make sure that it is, but there's
+ * a few internal rcu functions that could be traced
+ * where that wont work either. In those cases, we just
+ * do nothing.
+ */
+ if (unlikely(rcu_irq_enter_disabled()))
+ return;
+
+ rcu_irq_enter_irqson();
+ __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
+ rcu_irq_exit_irqson();
}
/**
@@ -7550,6 +7579,7 @@ static int instance_rmdir(const char *name)
}
tracing_set_nop(tr);
+ clear_ftrace_function_probes(tr);
event_trace_del_tracer(tr);
ftrace_clear_pids(tr);
ftrace_destroy_function_files(tr);