diff options
author | Valentin Schneider <valentin.schneider@arm.com> | 2019-03-12 01:47:46 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2019-05-02 19:54:57 +0300 |
commit | 90437bffa5f9b1440ba03e023f4875d1814b9360 (patch) | |
tree | b4fc340e85de21c34d58554c8e2065414689336b /arch/powerpc/kernel | |
parent | 32eebf96669568014b79b83a03f7895f3ec8c95f (diff) | |
download | linux-90437bffa5f9b1440ba03e023f4875d1814b9360.tar.xz |
powerpc/entry: Remove unneeded need_resched() loop
Since the enabling and disabling of IRQs within preempt_schedule_irq()
is contained in a need_resched() loop, we don't need the outer arch
code loop.
Signed-off-by: Valentin Schneider <valentin.schneider@arm.com>
[mpe: Rebase since CURRENT_THREAD_INFO() removal]
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/entry_32.S | 5 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 8 |
2 files changed, 2 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 235a01d34b6d..c18f3490a77e 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -906,10 +906,7 @@ resume_kernel: */ bl trace_hardirqs_off #endif -1: bl preempt_schedule_irq - lwz r3,TI_FLAGS(r2) - andi. r0,r3,_TIF_NEED_RESCHED - bne- 1b + bl preempt_schedule_irq #ifdef CONFIG_TRACE_IRQFLAGS /* And now, to properly rebalance the above, we tell lockdep they * are being turned back on, which will happen when we return diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 7cc25389c6bd..d978af78bf2a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -865,13 +865,7 @@ resume_kernel: * sure we are soft-disabled first and reconcile irq state. */ RECONCILE_IRQ_STATE(r3,r4) -1: bl preempt_schedule_irq - - /* Re-test flags and eventually loop */ - ld r9, PACA_THREAD_INFO(r13) - ld r4,TI_FLAGS(r9) - andi. r0,r4,_TIF_NEED_RESCHED - bne 1b + bl preempt_schedule_irq /* * arch_local_irq_restore() from preempt_schedule_irq above may |