summaryrefslogtreecommitdiff
path: root/include/linux/sched
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 04:41:08 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2023-02-21 04:41:08 +0300
commit1f2d9ffc7a5f916935749ffc6e93fb33bfe94d2f (patch)
treea5dabaa924d50867cbe347e20a7643b2850f11c0 /include/linux/sched
parenta2f0e7eee1344eb9f91b22bc72d9eb0a52b849c9 (diff)
parent7c4a5b89a0b5a57a64b601775b296abf77a9fe97 (diff)
downloadlinux-1f2d9ffc7a5f916935749ffc6e93fb33bfe94d2f.tar.xz
Merge tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler updates from Ingo Molnar: - Improve the scalability of the CFS bandwidth unthrottling logic with large number of CPUs. - Fix & rework various cpuidle routines, simplify interaction with the generic scheduler code. Add __cpuidle methods as noinstr to objtool's noinstr detection and fix boatloads of cpuidle bugs & quirks. - Add new ABI: introduce MEMBARRIER_CMD_GET_REGISTRATIONS, to query previously issued registrations. - Limit scheduler slice duration to the sysctl_sched_latency period, to improve scheduling granularity with a large number of SCHED_IDLE tasks. - Debuggability enhancement on sys_exit(): warn about disabled IRQs, but also enable them to prevent a cascade of followup problems and repeat warnings. - Fix the rescheduling logic in prio_changed_dl(). - Micro-optimize cpufreq and sched-util methods. - Micro-optimize ttwu_runnable() - Micro-optimize the idle-scanning in update_numa_stats(), select_idle_capacity() and steal_cookie_task(). - Update the RSEQ code & self-tests - Constify various scheduler methods - Remove unused methods - Refine __init tags - Documentation updates - Misc other cleanups, fixes * tag 'sched-core-2023-02-20' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (110 commits) sched/rt: pick_next_rt_entity(): check list_entry sched/deadline: Add more reschedule cases to prio_changed_dl() sched/fair: sanitize vruntime of entity being placed sched/fair: Remove capacity inversion detection sched/fair: unlink misfit task from cpu overutilized objtool: mem*() are not uaccess safe cpuidle: Fix poll_idle() noinstr annotation sched/clock: Make local_clock() noinstr sched/clock/x86: Mark sched_clock() noinstr x86/pvclock: Improve atomic update of last_value in pvclock_clocksource_read() x86/atomics: Always inline arch_atomic64*() cpuidle: tracing, preempt: Squash _rcuidle tracing cpuidle: tracing: Warn about !rcu_is_watching() cpuidle: lib/bug: Disable rcu_is_watching() during WARN/BUG cpuidle: drivers: firmware: psci: Dont instrument suspend code KVM: selftests: Fix build of rseq test exit: Detect and fix irq disabled state in oops cpuidle, arm64: Fix the ARM64 cpuidle logic cpuidle: mvebu: Fix duplicate flags assignment sched/fair: Limit sched slice duration ...
Diffstat (limited to 'include/linux/sched')
-rw-r--r--include/linux/sched/clock.h8
-rw-r--r--include/linux/sched/cputime.h9
-rw-r--r--include/linux/sched/idle.h40
3 files changed, 33 insertions, 24 deletions
diff --git a/include/linux/sched/clock.h b/include/linux/sched/clock.h
index 867d588314e0..ca008f7d3615 100644
--- a/include/linux/sched/clock.h
+++ b/include/linux/sched/clock.h
@@ -45,7 +45,7 @@ static inline u64 cpu_clock(int cpu)
return sched_clock();
}
-static inline u64 local_clock(void)
+static __always_inline u64 local_clock(void)
{
return sched_clock();
}
@@ -79,10 +79,8 @@ static inline u64 cpu_clock(int cpu)
return sched_clock_cpu(cpu);
}
-static inline u64 local_clock(void)
-{
- return sched_clock_cpu(raw_smp_processor_id());
-}
+extern u64 local_clock(void);
+
#endif
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
diff --git a/include/linux/sched/cputime.h b/include/linux/sched/cputime.h
index ce3c58286062..5f8fd5b24a2e 100644
--- a/include/linux/sched/cputime.h
+++ b/include/linux/sched/cputime.h
@@ -8,15 +8,6 @@
* cputime accounting APIs:
*/
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
-#include <asm/cputime.h>
-
-#ifndef cputime_to_nsecs
-# define cputime_to_nsecs(__ct) \
- (cputime_to_usecs(__ct) * NSEC_PER_USEC)
-#endif
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
extern bool task_cputime(struct task_struct *t,
u64 *utime, u64 *stime);
diff --git a/include/linux/sched/idle.h b/include/linux/sched/idle.h
index d73d314d59c6..478084f9105e 100644
--- a/include/linux/sched/idle.h
+++ b/include/linux/sched/idle.h
@@ -23,12 +23,37 @@ static inline void wake_up_if_idle(int cpu) { }
*/
#ifdef TIF_POLLING_NRFLAG
-static inline void __current_set_polling(void)
+#ifdef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+static __always_inline void __current_set_polling(void)
{
- set_thread_flag(TIF_POLLING_NRFLAG);
+ arch_set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
}
-static inline bool __must_check current_set_polling_and_test(void)
+static __always_inline void __current_clr_polling(void)
+{
+ arch_clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#else
+
+static __always_inline void __current_set_polling(void)
+{
+ set_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+static __always_inline void __current_clr_polling(void)
+{
+ clear_bit(TIF_POLLING_NRFLAG,
+ (unsigned long *)(&current_thread_info()->flags));
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H */
+
+static __always_inline bool __must_check current_set_polling_and_test(void)
{
__current_set_polling();
@@ -41,12 +66,7 @@ static inline bool __must_check current_set_polling_and_test(void)
return unlikely(tif_need_resched());
}
-static inline void __current_clr_polling(void)
-{
- clear_thread_flag(TIF_POLLING_NRFLAG);
-}
-
-static inline bool __must_check current_clr_polling_and_test(void)
+static __always_inline bool __must_check current_clr_polling_and_test(void)
{
__current_clr_polling();
@@ -73,7 +93,7 @@ static inline bool __must_check current_clr_polling_and_test(void)
}
#endif
-static inline void current_clr_polling(void)
+static __always_inline void current_clr_polling(void)
{
__current_clr_polling();