summaryrefslogtreecommitdiff
path: root/include/linux/sched/idle.h
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2017-02-01 20:47:28 +0300
committerIngo Molnar <mingo@kernel.org>2017-03-02 10:42:41 +0300
commit5dbe91de599423d243738a205367506444ebb4a4 (patch)
tree257bff7520fdce1dbc15eee0724d61a467c42cd1 /include/linux/sched/idle.h
parent4437722b0486c36dc90a1adf584fca4cea6cf1de (diff)
downloadlinux-5dbe91de599423d243738a205367506444ebb4a4.tar.xz
sched/headers: Move idle polling methods to <linux/sched/idle.h>
Further reduce the size of <linux/sched.h> by moving these APIs: tsk_is_polling() __current_set_polling() current_set_polling_and_test() __current_clr_polling() current_clr_polling_and_test() current_clr_polling() Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux/sched/idle.h')
-rw-r--r--include/linux/sched/idle.h76
1 files changed, 76 insertions, 0 deletions
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index 72b6b6ab6354..66ee32f87f0b 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -12,4 +12,80 @@ enum cpu_idle_type {
extern void wake_up_if_idle(int cpu);
+/*
+ * Idle thread specific functions to determine the need_resched
+ * polling state.
+ */
+#ifdef TIF_POLLING_NRFLAG
+static inline int tsk_is_polling(struct task_struct *p)
+{
+ return test_tsk_thread_flag(p, TIF_POLLING_NRFLAG);
+}
+
+static inline void __current_set_polling(void)
+{
+ set_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ __current_set_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+static inline void __current_clr_polling(void)
+{
+ clear_thread_flag(TIF_POLLING_NRFLAG);
+}
+
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Polling state must be visible before we test NEED_RESCHED,
+ * paired by resched_curr()
+ */
+ smp_mb__after_atomic();
+
+ return unlikely(tif_need_resched());
+}
+
+#else
+static inline int tsk_is_polling(struct task_struct *p) { return 0; }
+static inline void __current_set_polling(void) { }
+static inline void __current_clr_polling(void) { }
+
+static inline bool __must_check current_set_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+static inline bool __must_check current_clr_polling_and_test(void)
+{
+ return unlikely(tif_need_resched());
+}
+#endif
+
+static inline void current_clr_polling(void)
+{
+ __current_clr_polling();
+
+ /*
+ * Ensure we check TIF_NEED_RESCHED after we clear the polling bit.
+ * Once the bit is cleared, we'll get IPIs with every new
+ * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also
+ * fold.
+ */
+ smp_mb(); /* paired with resched_curr() */
+
+ preempt_fold_need_resched();
+}
+
#endif /* _LINUX_SCHED_IDLE_H */