diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-10-28 13:14:58 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-10-28 13:19:07 +0300 |
commit | 6afe40b4dace385d7ba2faf24b352f066f3b71bf (patch) | |
tree | 017e672e83a1257b084036c2e79adef95a81b9c3 | |
parent | 46fec7ac40e452a2ea5e63648d98b6bb2b5898f9 (diff) | |
download | linux-6afe40b4dace385d7ba2faf24b352f066f3b71bf.tar.xz |
lockdep: fix irqs on/off ip tracing
Impact: fix lockdep lock-api-caller output when irqsoff tracing is enabled
81d68a96 "ftrace: trace irq disabled critical timings" added wrappers around
trace_hardirqs_on/off_caller. However these functions use
__builtin_return_address(0) to figure out which function actually disabled
or enabled irqs. The result is that we save the ips of trace_hardirqs_on/off
instead of the real caller. Not very helpful.
However since the patch from Steven the ip already gets passed. So use that
and get rid of __builtin_return_address(0) in these two functions.
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | kernel/lockdep.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 11832acdde77..06e157119d2b 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2169,12 +2169,11 @@ void early_boot_irqs_on(void) /* * Hardirqs will be enabled: */ -void trace_hardirqs_on_caller(unsigned long a0) +void trace_hardirqs_on_caller(unsigned long ip) { struct task_struct *curr = current; - unsigned long ip; - time_hardirqs_on(CALLER_ADDR0, a0); + time_hardirqs_on(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2188,7 +2187,6 @@ void trace_hardirqs_on_caller(unsigned long a0) } /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; - ip = (unsigned long) __builtin_return_address(0); if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; @@ -2224,11 +2222,11 @@ EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ -void trace_hardirqs_off_caller(unsigned long a0) +void trace_hardirqs_off_caller(unsigned long ip) { struct task_struct *curr = current; - time_hardirqs_off(CALLER_ADDR0, a0); + time_hardirqs_off(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; @@ -2241,7 +2239,7 @@ void trace_hardirqs_off_caller(unsigned long a0) * We have done an ON -> OFF transition: */ curr->hardirqs_enabled = 0; - curr->hardirq_disable_ip = _RET_IP_; + curr->hardirq_disable_ip = ip; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(&hardirqs_off_events); } else |