summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
authorJohn Stultz <jstultz@google.com>2024-12-17 07:07:35 +0300
committerPeter Zijlstra <peterz@infradead.org>2024-12-20 17:31:21 +0300
commitabfdccd6af2b071951633e57d6322c46a1ea791f (patch)
treed852a162cbe01984b07795b714d31e39b6e85450 /include/linux
parentc2db11a750fb626d0d04f2dc76e548a1f07617df (diff)
downloadlinux-abfdccd6af2b071951633e57d6322c46a1ea791f.tar.xz
sched/wake_q: Add helper to call wake_up_q after unlock with preemption disabled
A common pattern seen when wake_qs are used to defer a wakeup until after a lock is released is something like: preempt_disable(); raw_spin_unlock(lock); wake_up_q(wake_q); preempt_enable(); So create some raw_spin_unlock*_wake() helper functions to clean this up. Applies on top of the fix I submitted here: https://lore.kernel.org/lkml/20241212222138.2400498-1-jstultz@google.com/ NOTE: I recognise the unlock()/unlock_irq()/unlock_irqrestore() variants creates its own duplication, which we could use a macro to generate the similar functions, but I often dislike how those generation macros making finding the actual implementation harder, so I left the three functions as is. If folks would prefer otherwise, let me know and I'll switch it. Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: John Stultz <jstultz@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20241217040803.243420-1-jstultz@google.com
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/sched/wake_q.h34
1 files changed, 34 insertions, 0 deletions
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
index 06cd8fb2f409..0f28b4623ad4 100644
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -63,4 +63,38 @@ extern void wake_q_add(struct wake_q_head *head, struct task_struct *task);
extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task);
extern void wake_up_q(struct wake_q_head *head);
+/* Spin unlock helpers to unlock and call wake_up_q with preempt disabled */
+static inline
+void raw_spin_unlock_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irq_wake(raw_spinlock_t *lock, struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irq(lock);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
+
+static inline
+void raw_spin_unlock_irqrestore_wake(raw_spinlock_t *lock, unsigned long flags,
+ struct wake_q_head *wake_q)
+{
+ guard(preempt)();
+ raw_spin_unlock_irqrestore(lock, flags);
+ if (wake_q) {
+ wake_up_q(wake_q);
+ wake_q_init(wake_q);
+ }
+}
#endif /* _LINUX_SCHED_WAKE_Q_H */