diff options
author | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-08-06 22:50:58 +0300 |
---|---|---|
committer | Steven Rostedt (VMware) <rostedt@goodmis.org> | 2018-08-10 22:11:25 +0300 |
commit | bff1b208a5d1dbb2355822ef859edcb9be0379e4 (patch) | |
tree | 14fad0cf56447502af1f714e22de707ad19f9a9d /kernel/trace/trace_preemptirq.c | |
parent | da5b3ebb4527733299661229a8d035d64a4f0b1a (diff) | |
download | linux-bff1b208a5d1dbb2355822ef859edcb9be0379e4.tar.xz |
tracing: Partial revert of "tracing: Centralize preemptirq tracepoints and unify their usage"
Joel Fernandes created a nice patch that cleaned up the duplicate hooks used
by lockdep and irqsoff latency tracer. It made both use tracepoints. But it
caused lockdep to trigger several false positives. We have not figured out
why yet, but removing lockdep from using the trace event hooks and just call
its helper functions directly (like it use to), makes the problem go away.
This is a partial revert of the clean up patch c3bc8fd637a9 ("tracing:
Centralize preemptirq tracepoints and unify their usage") that adds direct
calls for lockdep, but also keeps most of the clean up done to get rid of
the horrible preprocessor if statements.
Link: http://lkml.kernel.org/r/20180806155058.5ee875f4@gandalf.local.home
Cc: Peter Zijlstra <peterz@infradead.org>
Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
Fixes: c3bc8fd637a9 ("tracing: Centralize preemptirq tracepoints and unify their usage")
Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/trace_preemptirq.c')
-rw-r--r-- | kernel/trace/trace_preemptirq.c | 36 |
1 files changed, 20 insertions, 16 deletions
diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c index e76b78bf258e..fa656b25f427 100644 --- a/kernel/trace/trace_preemptirq.c +++ b/kernel/trace/trace_preemptirq.c @@ -19,41 +19,45 @@ static DEFINE_PER_CPU(int, tracing_irq_cpu); void trace_hardirqs_on(void) { - if (!this_cpu_read(tracing_irq_cpu)) - return; + if (this_cpu_read(tracing_irq_cpu)) { + trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + this_cpu_write(tracing_irq_cpu, 0); + } - trace_irq_enable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); - this_cpu_write(tracing_irq_cpu, 0); + lockdep_hardirqs_on(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_on); void trace_hardirqs_off(void) { - if (this_cpu_read(tracing_irq_cpu)) - return; + if (!this_cpu_read(tracing_irq_cpu)) { + this_cpu_write(tracing_irq_cpu, 1); + trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + } - this_cpu_write(tracing_irq_cpu, 1); - trace_irq_disable_rcuidle(CALLER_ADDR0, CALLER_ADDR1); + lockdep_hardirqs_off(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off); __visible void trace_hardirqs_on_caller(unsigned long caller_addr) { - if (!this_cpu_read(tracing_irq_cpu)) - return; + if (this_cpu_read(tracing_irq_cpu)) { + trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); + this_cpu_write(tracing_irq_cpu, 0); + } - trace_irq_enable_rcuidle(CALLER_ADDR0, caller_addr); - this_cpu_write(tracing_irq_cpu, 0); + lockdep_hardirqs_on(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_on_caller); __visible void trace_hardirqs_off_caller(unsigned long caller_addr) { - if (this_cpu_read(tracing_irq_cpu)) - return; + if (!this_cpu_read(tracing_irq_cpu)) { + this_cpu_write(tracing_irq_cpu, 1); + trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); + } - this_cpu_write(tracing_irq_cpu, 1); - trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr); + lockdep_hardirqs_off(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off_caller); #endif /* CONFIG_TRACE_IRQFLAGS */ |