summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2021-10-06 14:18:52 +0300
committerMinda Chen <minda.chen@starfivetech.com>2023-11-06 14:24:41 +0300
commitd705d190922314ae340cb70c5f96eb17aa0d2149 (patch)
tree23a746b79ef39cdcc94567515db8ddda916ad449
parent11c9294fee478f683e02aa83ee61648fad7ccdbf (diff)
downloadlinux-d705d190922314ae340cb70c5f96eb17aa0d2149.tar.xz
irq_work: Also rcuwait for !IRQ_WORK_HARD_IRQ on PREEMPT_RT
On PREEMPT_RT most items are processed as LAZY via softirq context. Avoid to spin-wait for them because irq_work_sync() could have higher priority and not allow the irq-work to be completed. Wait additionally for !IRQ_WORK_HARD_IRQ irq_work items on PREEMPT_RT. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Link: https://lore.kernel.org/r/20211006111852.1514359-5-bigeasy@linutronix.de
-rw-r--r--include/linux/irq_work.h5
-rw-r--r--kernel/irq_work.c6
2 files changed, 9 insertions, 2 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index b48955e9c920..8cd11a223260 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -49,6 +49,11 @@ static inline bool irq_work_is_busy(struct irq_work *work)
return atomic_read(&work->node.a_flags) & IRQ_WORK_BUSY;
}
+static inline bool irq_work_is_hard(struct irq_work *work)
+{
+ return atomic_read(&work->node.a_flags) & IRQ_WORK_HARD_IRQ;
+}
+
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 90b6b56f92e9..f7df715ec28e 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -217,7 +217,8 @@ void irq_work_single(void *arg)
*/
(void)atomic_cmpxchg(&work->node.a_flags, flags, flags & ~IRQ_WORK_BUSY);
- if (!arch_irq_work_has_interrupt())
+ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
+ !arch_irq_work_has_interrupt())
rcuwait_wake_up(&work->irqwait);
}
@@ -277,7 +278,8 @@ void irq_work_sync(struct irq_work *work)
lockdep_assert_irqs_enabled();
might_sleep();
- if (!arch_irq_work_has_interrupt()) {
+ if ((IS_ENABLED(CONFIG_PREEMPT_RT) && !irq_work_is_hard(work)) ||
+ !arch_irq_work_has_interrupt()) {
rcuwait_wait_event(&work->irqwait, !irq_work_is_busy(work),
TASK_UNINTERRUPTIBLE);
return;