summaryrefslogtreecommitdiff
path: root/arch/sparc
diff options
context:
space:
mode:
authorPaul E. McKenney <paulmck@linux.vnet.ibm.com>2012-04-13 07:35:13 +0400
committerDavid S. Miller <davem@davemloft.net>2012-04-13 22:58:38 +0400
commit3d3eeb2ef26112a200785e5fca58ec58dd33bf1e (patch)
tree9a7ba8c313355f9b9832bff425517c0457f0cf69 /arch/sparc
parent9e0daff30fd7ecf698e5d20b0fa7f851e427cca5 (diff)
downloadlinux-3d3eeb2ef26112a200785e5fca58ec58dd33bf1e.tar.xz
sparc64: Eliminate obsolete __handle_softirq() function
The invocation of softirq is now handled by irq_exit(), so there is no need for sparc64 to invoke it on the trap-return path. In fact, doing so is a bug because if the trap occurred in the idle loop, this invocation can result in lockdep-RCU failures. The problem is that RCU ignores idle CPUs, and the sparc64 trap-return path to the softirq handlers fails to tell RCU that the CPU must be considered non-idle while those handlers are executing. This means that RCU is ignoring any RCU read-side critical sections in those handlers, which in turn means that RCU-protected data can be yanked out from under those read-side critical sections. The shiny new lockdep-RCU ability to detect RCU read-side critical sections that RCU is ignoring located this problem. The fix is straightforward: Make sparc64 stop manually invoking the softirq handlers. Reported-by: Meelis Roos <mroos@linux.ee> Suggested-by: David Miller <davem@davemloft.net> Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Tested-by: Meelis Roos <mroos@linux.ee> Cc: stable@vger.kernel.org Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc')
-rw-r--r--arch/sparc/kernel/rtrap_64.S7
1 files changed, 0 insertions, 7 deletions
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 77f1b95e0806..9171fc238def 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -20,11 +20,6 @@
.text
.align 32
-__handle_softirq:
- call do_softirq
- nop
- ba,a,pt %xcc, __handle_softirq_continue
- nop
__handle_preemption:
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
@@ -89,9 +84,7 @@ rtrap:
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
- bne,pn %icc, __handle_softirq
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-__handle_softirq_continue:
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4