From c40dd90ac045fa1fdf6acc5bf9109a2315e6c92c Mon Sep 17 00:00:00 2001 From: Zhang Qiao Date: Thu, 27 Jun 2024 21:33:59 +0800 Subject: sched: Initialize the vruntime of a new task when it is first enqueued When creating a new task, we initialize vruntime of the newly task at sched_cgroup_fork(). However, the timing of executing this action is too early and may not be accurate. Because it uses current CPU to init the vruntime, but the new task actually runs on the cpu which be assigned at wake_up_new_task(). To optimize this case, we pass ENQUEUE_INITIAL flag to activate_task() in wake_up_new_task(), in this way, when place_entity is called in enqueue_entity(), the vruntime of the new task will be initialized. In addition, place_entity() in task_fork_fair() was introduced for two reasons: 1. Previously, the __enqueue_entity() was in task_new_fair(), in order to provide vruntime for enqueueing the newly task, the vruntime assignment equation "se->vruntime = cfs_rq->min_vruntime" was introduced by commit e9acbff6484d ("sched: introduce se->vruntime"). This is the initial state of place_entity(). 2. commit 4d78e7b656aa ("sched: new task placement for vruntime") added child_runs_first task placement feature which based on vruntime, this also requires the new task's vruntime value. After removing the child_runs_first and enqueue_entity() from task_fork_fair(), this place_entity() no longer makes sense, so remove it also. Signed-off-by: Zhang Qiao Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20240627133359.1370598-1-zhangqiao22@huawei.com --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 15 --------------- 2 files changed, 1 insertion(+), 16 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f3951e4a55e5..2c61b4fcb15e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4686,7 +4686,7 @@ void wake_up_new_task(struct task_struct *p) update_rq_clock(rq); post_init_entity_util_avg(p); - activate_task(rq, p, ENQUEUE_NOCLOCK); + activate_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_INITIAL); trace_sched_wakeup_new(p); wakeup_preempt(rq, p, WF_FORK); #ifdef CONFIG_SMP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9057584ec06d..e8cdfeb898e2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -12702,22 +12702,7 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) */ static void task_fork_fair(struct task_struct *p) { - struct sched_entity *se = &p->se, *curr; - struct cfs_rq *cfs_rq; - struct rq *rq = this_rq(); - struct rq_flags rf; - - rq_lock(rq, &rf); - update_rq_clock(rq); - set_task_max_allowed_capacity(p); - - cfs_rq = task_cfs_rq(current); - curr = cfs_rq->curr; - if (curr) - update_curr(cfs_rq); - place_entity(cfs_rq, se, ENQUEUE_INITIAL); - rq_unlock(rq, &rf); } /* -- cgit v1.2.3 From 0ec8d5aed4d14055aab4e2746def33f8b0d409c3 Mon Sep 17 00:00:00 2001 From: Peilin He Date: Tue, 16 Jul 2024 10:42:44 +0800 Subject: sched/core: Add WARN_ON_ONCE() to check overflow for migrate_disable() Background ========== When repeated migrate_disable() calls are made with missing the corresponding migrate_enable() calls, there is a risk of 'migration_disabled' going upper overflow because 'migration_disabled' is a type of unsigned short whose max value is 65535. In PREEMPT_RT kernel, if 'migration_disabled' goes upper overflow, it may make the migrate_disable() ineffective within local_lock_irqsave(). This is because, during the scheduling procedure, the value of 'migration_disabled' will be checked, which can trigger CPU migration. Consequently, the count of 'rcu_read_lock_nesting' may leak due to local_lock_irqsave() and local_unlock_irqrestore() occurring on different CPUs. Usecase ======== For example, When I developed a driver, I encountered a warning like "WARNING: CPU: 4 PID: 260 at kernel/rcu/tree_plugin.h:315 rcu_note_context_switch+0xa8/0x4e8" warning. It took me half a month to locate this issue. Ultimately, I discovered that the lack of upper overflow detection mechanism in migrate_disable() was the root cause, leading to a significant amount of time spent on problem localization. If the upper overflow detection mechanism was added to migrate_disable(), the root cause could be very quickly and easily identified. Effect ====== Using WARN_ON_ONCE() to check if 'migration_disabled' is upper overflow can help developers identify the issue quickly. Suggested-by: Peter Zijlstra Signed-off-by: Peilin He Signed-off-by: xu xin Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Yunkai Zhang Reviewed-by: Qiang Tu Reviewed-by: Kun Jiang Reviewed-by: Fan Yu Link: https://lkml.kernel.org/r/20240716104244764N2jD8gnBpnsLjCDnQGQ8c@zte.com.cn --- kernel/sched/core.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2c61b4fcb15e..db5823fcc31c 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2233,6 +2233,12 @@ void migrate_disable(void) struct task_struct *p = current; if (p->migration_disabled) { +#ifdef CONFIG_DEBUG_PREEMPT + /* + *Warn about overflow half-way through the range. + */ + WARN_ON_ONCE((s16)p->migration_disabled < 0); +#endif p->migration_disabled++; return; } @@ -2251,14 +2257,20 @@ void migrate_enable(void) .flags = SCA_MIGRATE_ENABLE, }; +#ifdef CONFIG_DEBUG_PREEMPT + /* + * Check both overflow from migrate_disable() and superfluous + * migrate_enable(). + */ + if (WARN_ON_ONCE((s16)p->migration_disabled <= 0)) + return; +#endif + if (p->migration_disabled > 1) { p->migration_disabled--; return; } - if (WARN_ON_ONCE(!p->migration_disabled)) - return; - /* * Ensure stop_task runs either before or after this, and that * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). -- cgit v1.2.3 From 2c2d9624697fc5e7dd84490ae01b80cc43ec2def Mon Sep 17 00:00:00 2001 From: Chuyi Zhou Date: Wed, 17 Jul 2024 22:33:42 +0800 Subject: sched/fair: Remove cfs_rq::nr_spread_over and cfs_rq::exec_clock nr_spread_over tracks the number of instances where the difference between a scheduling entity's virtual runtime and the minimum virtual runtime in the runqueue exceeds three times the scheduler latency, indicating significant disparity in task scheduling. Commit that removed its usage: 5e963f2bd: sched/fair: Commit to EEVDF cfs_rq->exec_clock was used to account for time spent executing tasks. Commit that removed its usage: 5d69eca542ee1 sched: Unify runtime accounting across classes cfs_rq::nr_spread_over and cfs_rq::exec_clock are not used anymore in eevdf. Remove them from struct cfs_rq. Signed-off-by: Chuyi Zhou Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Chengming Zhou Reviewed-by: K Prateek Nayak Acked-by: Vishal Chourasia Link: https://lore.kernel.org/r/20240717143342.593262-1-zhouchuyi@bytedance.com --- kernel/sched/debug.c | 4 ---- kernel/sched/sched.h | 6 ------ 2 files changed, 10 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index c1eb9a1afd13..90c4a9998377 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -641,8 +641,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, "\n"); SEQ_printf(m, "cfs_rq[%d]:\n", cpu); #endif - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock", - SPLIT_NS(cfs_rq->exec_clock)); raw_spin_rq_lock_irqsave(rq, flags); root = __pick_root_entity(cfs_rq); @@ -669,8 +667,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SPLIT_NS(right_vruntime)); spread = right_vruntime - left_vruntime; SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread", SPLIT_NS(spread)); - SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over", - cfs_rq->nr_spread_over); SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running); SEQ_printf(m, " .%-30s: %d\n", "h_nr_running", cfs_rq->h_nr_running); SEQ_printf(m, " .%-30s: %d\n", "idle_nr_running", diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 4c36cc680361..8a071022bdec 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -599,7 +599,6 @@ struct cfs_rq { s64 avg_vruntime; u64 avg_load; - u64 exec_clock; u64 min_vruntime; #ifdef CONFIG_SCHED_CORE unsigned int forceidle_seq; @@ -619,10 +618,6 @@ struct cfs_rq { struct sched_entity *curr; struct sched_entity *next; -#ifdef CONFIG_SCHED_DEBUG - unsigned int nr_spread_over; -#endif - #ifdef CONFIG_SMP /* * CFS load tracking @@ -1158,7 +1153,6 @@ struct rq { /* latency stats */ struct sched_info rq_sched_info; unsigned long long rq_cpu_time; - /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ /* sys_sched_yield() stats */ unsigned int yld_count; -- cgit v1.2.3 From a58501fb8320d6232507f722b4c9dcd4e03362ee Mon Sep 17 00:00:00 2001 From: Phil Auld Date: Wed, 15 May 2024 09:37:05 -0400 Subject: sched: remove HZ_BW feature hedge As a hedge against unexpected user issues commit 88c56cfeaec4 ("sched/fair: Block nohz tick_stop when cfs bandwidth in use") included a scheduler feature to disable the new functionality. It's been a few releases (v6.6) and no screams, so remove it. Signed-off-by: Phil Auld Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Link: https://lore.kernel.org/r/20240515133705.3632915-1-pauld@redhat.com --- kernel/sched/core.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/features.h | 2 -- 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index db5823fcc31c..0a71050f5f0d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1269,7 +1269,7 @@ bool sched_can_stop_tick(struct rq *rq) * dequeued by migrating while the constrained task continues to run. * E.g. going from 2->1 without going through pick_next_task(). */ - if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) { + if (__need_bw_check(rq, rq->curr)) { if (cfs_task_bw_constrained(rq->curr)) return false; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e8cdfeb898e2..02694fc5fce9 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6555,7 +6555,7 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p) { int cpu = cpu_of(rq); - if (!sched_feat(HZ_BW) || !cfs_bandwidth_used()) + if (!cfs_bandwidth_used()) return; if (!tick_nohz_full_cpu(cpu)) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 143f55df890b..929021fd6bc4 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -85,5 +85,3 @@ SCHED_FEAT(WA_BIAS, true) SCHED_FEAT(UTIL_EST, true) SCHED_FEAT(LATENCY_WARN, false) - -SCHED_FEAT(HZ_BW, true) -- cgit v1.2.3 From faa42d29419def58d3c3e5b14ad4037f0af3b496 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Wed, 26 Jun 2024 10:35:05 +0800 Subject: sched/fair: Make SCHED_IDLE entity be preempted in strict hierarchy Consider the following cgroup: root | ------------------------ | | normal_cgroup idle_cgroup | | SCHED_IDLE task_A SCHED_NORMAL task_B According to the cgroup hierarchy, A should preempt B. But current check_preempt_wakeup_fair() treats cgroup se and task separately, so B will preempt A unexpectedly. Unify the wakeup logic by {c,p}se_is_idle only. This makes SCHED_IDLE of a task a relative policy that is effective only within its own cgroup, similar to the behavior of NICE. Also fix se_is_idle() definition when !CONFIG_FAIR_GROUP_SCHED. Fixes: 304000390f88 ("sched: Cgroup SCHED_IDLE support") Signed-off-by: Tianchen Ding Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Josh Don Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20240626023505.1332596-1-dtcccc@linux.alibaba.com --- kernel/sched/fair.c | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 02694fc5fce9..99c80abdbaaa 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -511,7 +511,7 @@ static int cfs_rq_is_idle(struct cfs_rq *cfs_rq) static int se_is_idle(struct sched_entity *se) { - return 0; + return task_has_idle_policy(task_of(se)); } #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -8381,16 +8381,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int if (test_tsk_need_resched(curr)) return; - /* Idle tasks are by definition preempted by non-idle tasks. */ - if (unlikely(task_has_idle_policy(curr)) && - likely(!task_has_idle_policy(p))) - goto preempt; - - /* - * Batch and idle tasks do not preempt non-idle tasks (their preemption - * is driven by the tick): - */ - if (unlikely(p->policy != SCHED_NORMAL) || !sched_feat(WAKEUP_PREEMPTION)) + if (!sched_feat(WAKEUP_PREEMPTION)) return; find_matching_se(&se, &pse); @@ -8400,7 +8391,7 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int pse_is_idle = se_is_idle(pse); /* - * Preempt an idle group in favor of a non-idle group (and don't preempt + * Preempt an idle entity in favor of a non-idle entity (and don't preempt * in the inverse case). */ if (cse_is_idle && !pse_is_idle) @@ -8408,9 +8399,14 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int if (cse_is_idle != pse_is_idle) return; + /* + * BATCH and IDLE tasks do not preempt others. + */ + if (unlikely(p->policy != SCHED_NORMAL)) + return; + cfs_rq = cfs_rq_of(se); update_curr(cfs_rq); - /* * XXX pick_eevdf(cfs_rq) != se ? */ -- cgit v1.2.3 From f23c042ce34ba265cf3129d530702b5d218e3f4b Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 27 May 2024 14:06:47 +0200 Subject: sched/deadline: Comment sched_dl_entity::dl_server variable Add an explanation for the newly added variable. Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers") Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/147f7aa8cb8fd925f36aa8059af6a35aad08b45a.1716811044.git.bristot@kernel.org --- include/linux/sched.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/sched.h b/include/linux/sched.h index f8d150343d42..1c771ea4481d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -639,6 +639,8 @@ struct sched_dl_entity { * * @dl_overrun tells if the task asked to be informed about runtime * overruns. + * + * @dl_server tells if this is a server entity. */ unsigned int dl_throttled : 1; unsigned int dl_yielded : 1; -- cgit v1.2.3 From c245910049d04fbfa85bb2f5acd591c24e9907c7 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 27 May 2024 14:06:48 +0200 Subject: sched/core: Add clearing of ->dl_server in put_prev_task_balance() Paths using put_prev_task_balance() need to do a pick shortly after. Make sure they also clear the ->dl_server on prev as a part of that. Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers") Signed-off-by: "Joel Fernandes (Google)" Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/d184d554434bedbad0581cb34656582d78655150.1716811044.git.bristot@kernel.org --- kernel/sched/core.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0a71050f5f0d..e61da3b88be3 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5801,6 +5801,14 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, #endif put_prev_task(rq, prev); + + /* + * We've updated @prev and no longer need the server link, clear it. + * Must be done before ->pick_next_task() because that can (re)set + * ->dl_server. + */ + if (prev->dl_server) + prev->dl_server = NULL; } /* @@ -5844,14 +5852,6 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) restart: put_prev_task_balance(rq, prev, rf); - /* - * We've updated @prev and no longer need the server link, clear it. - * Must be done before ->pick_next_task() because that can (re)set - * ->dl_server. - */ - if (prev->dl_server) - prev->dl_server = NULL; - for_each_class(class) { p = class->pick_next_task(rq); if (p) -- cgit v1.2.3 From a741b82423f41501e301eb6f9820b45ca202e877 Mon Sep 17 00:00:00 2001 From: Youssef Esmat Date: Mon, 27 May 2024 14:06:49 +0200 Subject: sched/core: Clear prev->dl_server in CFS pick fast path In case the previous pick was a DL server pick, ->dl_server might be set. Clear it in the fast path as well. Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers") Signed-off-by: Youssef Esmat Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/7f7381ccba09efcb4a1c1ff808ed58385eccc222.1716811044.git.bristot@kernel.org --- kernel/sched/core.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e61da3b88be3..1074ae804480 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5839,6 +5839,13 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) p = pick_next_task_idle(rq); } + /* + * This is a normal CFS pick, but the previous could be a DL pick. + * Clear it as previous is no longer picked. + */ + if (prev->dl_server) + prev->dl_server = NULL; + /* * This is the fast path; it cannot be a DL server pick; * therefore even if @p == @prev, ->dl_server must be NULL. -- cgit v1.2.3 From 557a6bfc662c4d560f909b78adb1270c9862efa8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 27 May 2024 14:06:50 +0200 Subject: sched/fair: Add trivial fair server Use deadline servers to service fair tasks. This patch adds a fair_server deadline entity which acts as a container for fair entities and can be used to fix starvation when higher priority (wrt fair) tasks are monopolizing CPU(s). Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Link: https://lore.kernel.org/r/b6b0bcefaf25391bcf5b6ecdb9f1218de402d42e.1716811044.git.bristot@kernel.org --- kernel/sched/core.c | 1 + kernel/sched/deadline.c | 23 +++++++++++++++++++++++ kernel/sched/fair.c | 34 ++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 4 ++++ 4 files changed, 62 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 1074ae804480..f95600c092d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8336,6 +8336,7 @@ void __init sched_init(void) #endif /* CONFIG_SMP */ hrtick_rq_init(rq); atomic_set(&rq->nr_iowait, 0); + fair_server_init(rq); #ifdef CONFIG_SCHED_CORE rq->core = rq; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f59e5c19d944..f5b531372e3f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1381,6 +1381,13 @@ throttle: resched_curr(rq); } + /* + * The fair server (sole dl_server) does not account for real-time + * workload because it is running fair work. + */ + if (dl_se == &rq->fair_server) + return; + /* * Because -- for now -- we share the rt bandwidth, we need to * account our runtime there too, otherwise actual rt tasks @@ -1414,15 +1421,31 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) void dl_server_start(struct sched_dl_entity *dl_se) { + struct rq *rq = dl_se->rq; + if (!dl_server(dl_se)) { + /* Disabled */ + dl_se->dl_runtime = 0; + dl_se->dl_deadline = 1000 * NSEC_PER_MSEC; + dl_se->dl_period = 1000 * NSEC_PER_MSEC; + dl_se->dl_server = 1; setup_new_dl_entity(dl_se); } + + if (!dl_se->dl_runtime) + return; + enqueue_dl_entity(dl_se, ENQUEUE_WAKEUP); + if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &rq->curr->dl)) + resched_curr(dl_se->rq); } void dl_server_stop(struct sched_dl_entity *dl_se) { + if (!dl_se->dl_runtime) + return; + dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 99c80abdbaaa..aba23b08e52d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5765,6 +5765,7 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, idle_task_delta, dequeue = 1; + long rq_h_nr_running = rq->cfs.h_nr_running; raw_spin_lock(&cfs_b->lock); /* This will start the period timer if necessary */ @@ -5837,6 +5838,9 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) sub_nr_running(rq, task_delta); done: + /* Stop the fair server if throttling resulted in no runnable tasks */ + if (rq_h_nr_running && !rq->cfs.h_nr_running) + dl_server_stop(&rq->fair_server); /* * Note: distribution will already see us throttled via the * throttled-list. rq->lock protects completion. @@ -5854,6 +5858,7 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); struct sched_entity *se; long task_delta, idle_task_delta; + long rq_h_nr_running = rq->cfs.h_nr_running; se = cfs_rq->tg->se[cpu_of(rq)]; @@ -5929,6 +5934,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) unthrottle_throttle: assert_list_leaf_cfs_rq(rq); + /* Start the fair server if un-throttling resulted in new runnable tasks */ + if (!rq_h_nr_running && rq->cfs.h_nr_running) + dl_server_start(&rq->fair_server); + /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); @@ -6759,6 +6768,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) */ util_est_enqueue(&rq->cfs, p); + if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) + dl_server_start(&rq->fair_server); + /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag @@ -6903,6 +6915,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) rq->next_balance = jiffies; dequeue_throttle: + if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) + dl_server_stop(&rq->fair_server); + util_est_update(&rq->cfs, p, task_sleep); hrtick_update(rq); } @@ -8602,6 +8617,25 @@ static struct task_struct *__pick_next_task_fair(struct rq *rq) return pick_next_task_fair(rq, NULL, NULL); } +static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) +{ + return !!dl_se->rq->cfs.nr_running; +} + +static struct task_struct *fair_server_pick(struct sched_dl_entity *dl_se) +{ + return pick_next_task_fair(dl_se->rq, NULL, NULL); +} + +void fair_server_init(struct rq *rq) +{ + struct sched_dl_entity *dl_se = &rq->fair_server; + + init_dl_entity(dl_se); + + dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick); +} + /* * Account for a descheduled task: */ diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 8a071022bdec..7416bcd2a549 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -363,6 +363,8 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, dl_server_pick_f pick); +extern void fair_server_init(struct rq *rq); + #ifdef CONFIG_CGROUP_SCHED extern struct list_head task_groups; @@ -1039,6 +1041,8 @@ struct rq { struct rt_rq rt; struct dl_rq dl; + struct sched_dl_entity fair_server; + #ifdef CONFIG_FAIR_GROUP_SCHED /* list of leaf cfs_rq on this CPU: */ struct list_head leaf_cfs_rq_list; -- cgit v1.2.3 From a110a81c52a9de73e2e57e826dd3bf0fd4c22226 Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 27 May 2024 14:06:51 +0200 Subject: sched/deadline: Deferrable dl server Among the motivations for the DL servers is the real-time throttling mechanism. This mechanism works by throttling the rt_rq after running for a long period without leaving space for fair tasks. The base dl server avoids this problem by boosting fair tasks instead of throttling the rt_rq. The point is that it boosts without waiting for potential starvation, causing some non-intuitive cases. For example, an IRQ dispatches two tasks on an idle system, a fair and an RT. The DL server will be activated, running the fair task before the RT one. This problem can be avoided by deferring the dl server activation. By setting the defer option, the dl_server will dispatch an SCHED_DEADLINE reservation with replenished runtime, but throttled. The dl_timer will be set for the defer time at (period - runtime) ns from start time. Thus boosting the fair rq at defer time. If the fair scheduler has the opportunity to run while waiting for defer time, the dl server runtime will be consumed. If the runtime is completely consumed before the defer time, the server will be replenished while still in a throttled state. Then, the dl_timer will be reset to the new defer time If the fair server reaches the defer time without consuming its runtime, the server will start running, following CBS rules (thus without breaking SCHED_DEADLINE). Then the server will continue the running state (without deferring) until it fair tasks are able to execute as regular fair scheduler (end of the starvation). Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Link: https://lore.kernel.org/r/dd175943c72533cd9f0b87767c6499204879cc38.1716811044.git.bristot@kernel.org --- include/linux/sched.h | 12 ++ kernel/sched/deadline.c | 301 +++++++++++++++++++++++++++++++++++++++++------- kernel/sched/fair.c | 24 +++- kernel/sched/idle.c | 2 + kernel/sched/sched.h | 4 +- 5 files changed, 298 insertions(+), 45 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 1c771ea4481d..4edd7e2096fb 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -641,12 +641,24 @@ struct sched_dl_entity { * overruns. * * @dl_server tells if this is a server entity. + * + * @dl_defer tells if this is a deferred or regular server. For + * now only defer server exists. + * + * @dl_defer_armed tells if the deferrable server is waiting + * for the replenishment timer to activate it. + * + * @dl_defer_running tells if the deferrable server is actually + * running, skipping the defer phase. */ unsigned int dl_throttled : 1; unsigned int dl_yielded : 1; unsigned int dl_non_contending : 1; unsigned int dl_overrun : 1; unsigned int dl_server : 1; + unsigned int dl_defer : 1; + unsigned int dl_defer_armed : 1; + unsigned int dl_defer_running : 1; /* * Bandwidth enforcement timer. Each -deadline task has its diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f5b531372e3f..1b295314bc93 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -771,6 +771,15 @@ static inline void replenish_dl_new_period(struct sched_dl_entity *dl_se, /* for non-boosted task, pi_of(dl_se) == dl_se */ dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; dl_se->runtime = pi_of(dl_se)->dl_runtime; + + /* + * If it is a deferred reservation, and the server + * is not handling an starvation case, defer it. + */ + if (dl_se->dl_defer & !dl_se->dl_defer_running) { + dl_se->dl_throttled = 1; + dl_se->dl_defer_armed = 1; + } } /* @@ -809,6 +818,9 @@ static inline void setup_new_dl_entity(struct sched_dl_entity *dl_se) replenish_dl_new_period(dl_se, rq); } +static int start_dl_timer(struct sched_dl_entity *dl_se); +static bool dl_entity_overflow(struct sched_dl_entity *dl_se, u64 t); + /* * Pure Earliest Deadline First (EDF) scheduling does not deal with the * possibility of a entity lasting more than what it declared, and thus @@ -837,9 +849,18 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se) /* * This could be the case for a !-dl task that is boosted. * Just go with full inherited parameters. + * + * Or, it could be the case of a deferred reservation that + * was not able to consume its runtime in background and + * reached this point with current u > U. + * + * In both cases, set a new period. */ - if (dl_se->dl_deadline == 0) - replenish_dl_new_period(dl_se, rq); + if (dl_se->dl_deadline == 0 || + (dl_se->dl_defer_armed && dl_entity_overflow(dl_se, rq_clock(rq)))) { + dl_se->deadline = rq_clock(rq) + pi_of(dl_se)->dl_deadline; + dl_se->runtime = pi_of(dl_se)->dl_runtime; + } if (dl_se->dl_yielded && dl_se->runtime > 0) dl_se->runtime = 0; @@ -873,6 +894,44 @@ static void replenish_dl_entity(struct sched_dl_entity *dl_se) dl_se->dl_yielded = 0; if (dl_se->dl_throttled) dl_se->dl_throttled = 0; + + /* + * If this is the replenishment of a deferred reservation, + * clear the flag and return. + */ + if (dl_se->dl_defer_armed) { + dl_se->dl_defer_armed = 0; + return; + } + + /* + * A this point, if the deferred server is not armed, and the deadline + * is in the future, if it is not running already, throttle the server + * and arm the defer timer. + */ + if (dl_se->dl_defer && !dl_se->dl_defer_running && + dl_time_before(rq_clock(dl_se->rq), dl_se->deadline - dl_se->runtime)) { + if (!is_dl_boosted(dl_se) && dl_se->server_has_tasks(dl_se)) { + + /* + * Set dl_se->dl_defer_armed and dl_throttled variables to + * inform the start_dl_timer() that this is a deferred + * activation. + */ + dl_se->dl_defer_armed = 1; + dl_se->dl_throttled = 1; + if (!start_dl_timer(dl_se)) { + /* + * If for whatever reason (delays), a previous timer was + * queued but not serviced, cancel it and clean the + * deferrable server variables intended for start_dl_timer(). + */ + hrtimer_try_to_cancel(&dl_se->dl_timer); + dl_se->dl_defer_armed = 0; + dl_se->dl_throttled = 0; + } + } + } } /* @@ -1023,6 +1082,15 @@ static void update_dl_entity(struct sched_dl_entity *dl_se) } replenish_dl_new_period(dl_se, rq); + } else if (dl_server(dl_se) && dl_se->dl_defer) { + /* + * The server can still use its previous deadline, so check if + * it left the dl_defer_running state. + */ + if (!dl_se->dl_defer_running) { + dl_se->dl_defer_armed = 1; + dl_se->dl_throttled = 1; + } } } @@ -1055,8 +1123,21 @@ static int start_dl_timer(struct sched_dl_entity *dl_se) * We want the timer to fire at the deadline, but considering * that it is actually coming from rq->clock and not from * hrtimer's time base reading. + * + * The deferred reservation will have its timer set to + * (deadline - runtime). At that point, the CBS rule will decide + * if the current deadline can be used, or if a replenishment is + * required to avoid add too much pressure on the system + * (current u > U). */ - act = ns_to_ktime(dl_next_period(dl_se)); + if (dl_se->dl_defer_armed) { + WARN_ON_ONCE(!dl_se->dl_throttled); + act = ns_to_ktime(dl_se->deadline - dl_se->runtime); + } else { + /* act = deadline - rel-deadline + period */ + act = ns_to_ktime(dl_next_period(dl_se)); + } + now = hrtimer_cb_get_time(timer); delta = ktime_to_ns(now) - rq_clock(rq); act = ktime_add_ns(act, delta); @@ -1106,6 +1187,62 @@ static void __push_dl_task(struct rq *rq, struct rq_flags *rf) #endif } +/* a defer timer will not be reset if the runtime consumed was < dl_server_min_res */ +static const u64 dl_server_min_res = 1 * NSEC_PER_MSEC; + +static enum hrtimer_restart dl_server_timer(struct hrtimer *timer, struct sched_dl_entity *dl_se) +{ + struct rq *rq = rq_of_dl_se(dl_se); + u64 fw; + + scoped_guard (rq_lock, rq) { + struct rq_flags *rf = &scope.rf; + + if (!dl_se->dl_throttled || !dl_se->dl_runtime) + return HRTIMER_NORESTART; + + sched_clock_tick(); + update_rq_clock(rq); + + if (!dl_se->dl_runtime) + return HRTIMER_NORESTART; + + if (!dl_se->server_has_tasks(dl_se)) { + replenish_dl_entity(dl_se); + return HRTIMER_NORESTART; + } + + if (dl_se->dl_defer_armed) { + /* + * First check if the server could consume runtime in background. + * If so, it is possible to push the defer timer for this amount + * of time. The dl_server_min_res serves as a limit to avoid + * forwarding the timer for a too small amount of time. + */ + if (dl_time_before(rq_clock(dl_se->rq), + (dl_se->deadline - dl_se->runtime - dl_server_min_res))) { + + /* reset the defer timer */ + fw = dl_se->deadline - rq_clock(dl_se->rq) - dl_se->runtime; + + hrtimer_forward_now(timer, ns_to_ktime(fw)); + return HRTIMER_RESTART; + } + + dl_se->dl_defer_running = 1; + } + + enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); + + if (!dl_task(dl_se->rq->curr) || dl_entity_preempt(dl_se, &dl_se->rq->curr->dl)) + resched_curr(rq); + + __push_dl_task(rq, rf); + } + + return HRTIMER_NORESTART; +} + /* * This is the bandwidth enforcement timer callback. If here, we know * a task is not on its dl_rq, since the fact that the timer was running @@ -1128,28 +1265,8 @@ static enum hrtimer_restart dl_task_timer(struct hrtimer *timer) struct rq_flags rf; struct rq *rq; - if (dl_server(dl_se)) { - struct rq *rq = rq_of_dl_se(dl_se); - struct rq_flags rf; - - rq_lock(rq, &rf); - if (dl_se->dl_throttled) { - sched_clock_tick(); - update_rq_clock(rq); - - if (dl_se->server_has_tasks(dl_se)) { - enqueue_dl_entity(dl_se, ENQUEUE_REPLENISH); - resched_curr(rq); - __push_dl_task(rq, &rf); - } else { - replenish_dl_entity(dl_se); - } - - } - rq_unlock(rq, &rf); - - return HRTIMER_NORESTART; - } + if (dl_server(dl_se)) + return dl_server_timer(timer, dl_se); p = dl_task_of(dl_se); rq = task_rq_lock(p, &rf); @@ -1319,22 +1436,10 @@ static u64 grub_reclaim(u64 delta, struct rq *rq, struct sched_dl_entity *dl_se) return (delta * u_act) >> BW_SHIFT; } -static inline void -update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, - int flags); -static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) +s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) { s64 scaled_delta_exec; - if (unlikely(delta_exec <= 0)) { - if (unlikely(dl_se->dl_yielded)) - goto throttle; - return; - } - - if (dl_entity_is_special(dl_se)) - return; - /* * For tasks that participate in GRUB, we implement GRUB-PA: the * spare reclaimed bandwidth is used to clock down frequency. @@ -1353,8 +1458,64 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 scaled_delta_exec = cap_scale(scaled_delta_exec, scale_cpu); } + return scaled_delta_exec; +} + +static inline void +update_stats_dequeue_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se, + int flags); +static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec) +{ + s64 scaled_delta_exec; + + if (unlikely(delta_exec <= 0)) { + if (unlikely(dl_se->dl_yielded)) + goto throttle; + return; + } + + if (dl_server(dl_se) && dl_se->dl_throttled && !dl_se->dl_defer) + return; + + if (dl_entity_is_special(dl_se)) + return; + + scaled_delta_exec = dl_scaled_delta_exec(rq, dl_se, delta_exec); + dl_se->runtime -= scaled_delta_exec; + /* + * The fair server can consume its runtime while throttled (not queued/ + * running as regular CFS). + * + * If the server consumes its entire runtime in this state. The server + * is not required for the current period. Thus, reset the server by + * starting a new period, pushing the activation. + */ + if (dl_se->dl_defer && dl_se->dl_throttled && dl_runtime_exceeded(dl_se)) { + /* + * If the server was previously activated - the starving condition + * took place, it this point it went away because the fair scheduler + * was able to get runtime in background. So return to the initial + * state. + */ + dl_se->dl_defer_running = 0; + + hrtimer_try_to_cancel(&dl_se->dl_timer); + + replenish_dl_new_period(dl_se, dl_se->rq); + + /* + * Not being able to start the timer seems problematic. If it could not + * be started for whatever reason, we need to "unthrottle" the DL server + * and queue right away. Otherwise nothing might queue it. That's similar + * to what enqueue_dl_entity() does on start_dl_timer==0. For now, just warn. + */ + WARN_ON_ONCE(!start_dl_timer(dl_se)); + + return; + } + throttle: if (dl_runtime_exceeded(dl_se) || dl_se->dl_yielded) { dl_se->dl_throttled = 1; @@ -1414,9 +1575,46 @@ throttle: } } +/* + * In the non-defer mode, the idle time is not accounted, as the + * server provides a guarantee. + * + * If the dl_server is in defer mode, the idle time is also considered + * as time available for the fair server, avoiding a penalty for the + * rt scheduler that did not consumed that time. + */ +void dl_server_update_idle_time(struct rq *rq, struct task_struct *p) +{ + s64 delta_exec, scaled_delta_exec; + + if (!rq->fair_server.dl_defer) + return; + + /* no need to discount more */ + if (rq->fair_server.runtime < 0) + return; + + delta_exec = rq_clock_task(rq) - p->se.exec_start; + if (delta_exec < 0) + return; + + scaled_delta_exec = dl_scaled_delta_exec(rq, &rq->fair_server, delta_exec); + + rq->fair_server.runtime -= scaled_delta_exec; + + if (rq->fair_server.runtime < 0) { + rq->fair_server.dl_defer_running = 0; + rq->fair_server.runtime = 0; + } + + p->se.exec_start = rq_clock_task(rq); +} + void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) { - update_curr_dl_se(dl_se->rq, dl_se, delta_exec); + /* 0 runtime = fair server disabled */ + if (dl_se->dl_runtime) + update_curr_dl_se(dl_se->rq, dl_se, delta_exec); } void dl_server_start(struct sched_dl_entity *dl_se) @@ -1430,6 +1628,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) dl_se->dl_period = 1000 * NSEC_PER_MSEC; dl_se->dl_server = 1; + dl_se->dl_defer = 1; setup_new_dl_entity(dl_se); } @@ -1447,6 +1646,9 @@ void dl_server_stop(struct sched_dl_entity *dl_se) return; dequeue_dl_entity(dl_se, DEQUEUE_SLEEP); + hrtimer_try_to_cancel(&dl_se->dl_timer); + dl_se->dl_defer_armed = 0; + dl_se->dl_throttled = 0; } void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, @@ -1758,7 +1960,7 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) * be counted in the active utilization; hence, we need to call * add_running_bw(). */ - if (dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { + if (!dl_se->dl_defer && dl_se->dl_throttled && !(flags & ENQUEUE_REPLENISH)) { if (flags & ENQUEUE_WAKEUP) task_contending(dl_se, flags); @@ -1780,6 +1982,25 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, int flags) setup_new_dl_entity(dl_se); } + /* + * If the reservation is still throttled, e.g., it got replenished but is a + * deferred task and still got to wait, don't enqueue. + */ + if (dl_se->dl_throttled && start_dl_timer(dl_se)) + return; + + /* + * We're about to enqueue, make sure we're not ->dl_throttled! + * In case the timer was not started, say because the defer time + * has passed, mark as not throttled and mark unarmed. + * Also cancel earlier timers, since letting those run is pointless. + */ + if (dl_se->dl_throttled) { + hrtimer_try_to_cancel(&dl_se->dl_timer); + dl_se->dl_defer_armed = 0; + dl_se->dl_throttled = 0; + } + __enqueue_dl_entity(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index aba23b08e52d..1ea5ec81431a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1156,12 +1156,13 @@ s64 update_curr_common(struct rq *rq) static void update_curr(struct cfs_rq *cfs_rq) { struct sched_entity *curr = cfs_rq->curr; + struct rq *rq = rq_of(cfs_rq); s64 delta_exec; if (unlikely(!curr)) return; - delta_exec = update_curr_se(rq_of(cfs_rq), curr); + delta_exec = update_curr_se(rq, curr); if (unlikely(delta_exec <= 0)) return; @@ -1169,8 +1170,19 @@ static void update_curr(struct cfs_rq *cfs_rq) update_deadline(cfs_rq, curr); update_min_vruntime(cfs_rq); - if (entity_is_task(curr)) - update_curr_task(task_of(curr), delta_exec); + if (entity_is_task(curr)) { + struct task_struct *p = task_of(curr); + + update_curr_task(p, delta_exec); + + /* + * Any fair task that runs outside of fair_server should + * account against fair_server such that it can account for + * this time and possibly avoid running this period. + */ + if (p->dl_server != &rq->fair_server) + dl_server_update(&rq->fair_server, delta_exec); + } account_cfs_rq_runtime(cfs_rq, delta_exec); } @@ -6768,8 +6780,12 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) */ util_est_enqueue(&rq->cfs, p); - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) + if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) { + /* Account for idle runtime */ + if (!rq->nr_running) + dl_server_update_idle_time(rq, rq->curr); dl_server_start(&rq->fair_server); + } /* * If in_iowait is set, the code below may not trigger any cpufreq diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 6e78d071beb5..d560f7ffa463 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -452,12 +452,14 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) { + dl_server_update_idle_time(rq, prev); } static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first) { update_idle_core(rq); schedstat_inc(rq->sched_goidle); + next->se.exec_start = rq_clock_task(rq); } #ifdef CONFIG_SMP diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7416bcd2a549..64fb6776664e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -335,7 +335,7 @@ extern bool __checkparam_dl(const struct sched_attr *attr); extern bool dl_param_changed(struct task_struct *p, const struct sched_attr *attr); extern int dl_cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial); extern int dl_bw_check_overflow(int cpu); - +extern s64 dl_scaled_delta_exec(struct rq *rq, struct sched_dl_entity *dl_se, s64 delta_exec); /* * SCHED_DEADLINE supports servers (nested scheduling) with the following * interface: @@ -363,6 +363,8 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, dl_server_pick_f pick); +extern void dl_server_update_idle_time(struct rq *rq, + struct task_struct *p); extern void fair_server_init(struct rq *rq); #ifdef CONFIG_CGROUP_SCHED -- cgit v1.2.3 From d741f297bceaf52aa1b97b997708fc0cd853c73e Mon Sep 17 00:00:00 2001 From: Daniel Bristot de Oliveira Date: Mon, 27 May 2024 14:06:52 +0200 Subject: sched/fair: Fair server interface Add an interface for fair server setup on debugfs. Each CPU has two files under /debug/sched/fair_server/cpu{ID}: - runtime: set runtime in ns - period: set period in ns This then leaves /proc/sys/kernel/sched_rt_{period,runtime}_us to set bounds on admission control. The interface also add the server to the dl bandwidth accounting. Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Link: https://lore.kernel.org/r/a9ef9fc69bcedb44bddc9bc34f2b313296052819.1716811044.git.bristot@kernel.org --- kernel/sched/deadline.c | 103 +++++++++++++++++++++++++------ kernel/sched/debug.c | 159 ++++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/sched.h | 3 + kernel/sched/topology.c | 8 +++ 4 files changed, 256 insertions(+), 17 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 1b295314bc93..747c0c51d753 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -320,19 +320,12 @@ void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) __sub_running_bw(dl_se->dl_bw, dl_rq); } -static void dl_change_utilization(struct task_struct *p, u64 new_bw) +static void dl_rq_change_utilization(struct rq *rq, struct sched_dl_entity *dl_se, u64 new_bw) { - struct rq *rq; - - WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); - - if (task_on_rq_queued(p)) - return; + if (dl_se->dl_non_contending) { + sub_running_bw(dl_se, &rq->dl); + dl_se->dl_non_contending = 0; - rq = task_rq(p); - if (p->dl.dl_non_contending) { - sub_running_bw(&p->dl, &rq->dl); - p->dl.dl_non_contending = 0; /* * If the timer handler is currently running and the * timer cannot be canceled, inactive_task_timer() @@ -340,13 +333,25 @@ static void dl_change_utilization(struct task_struct *p, u64 new_bw) * will not touch the rq's active utilization, * so we are still safe. */ - if (hrtimer_try_to_cancel(&p->dl.inactive_timer) == 1) - put_task_struct(p); + if (hrtimer_try_to_cancel(&dl_se->inactive_timer) == 1) { + if (!dl_server(dl_se)) + put_task_struct(dl_task_of(dl_se)); + } } - __sub_rq_bw(p->dl.dl_bw, &rq->dl); + __sub_rq_bw(dl_se->dl_bw, &rq->dl); __add_rq_bw(new_bw, &rq->dl); } +static void dl_change_utilization(struct task_struct *p, u64 new_bw) +{ + WARN_ON_ONCE(p->dl.flags & SCHED_FLAG_SUGOV); + + if (task_on_rq_queued(p)) + return; + + dl_rq_change_utilization(task_rq(p), &p->dl, new_bw); +} + static void __dl_clear_params(struct sched_dl_entity *dl_se); /* @@ -1621,11 +1626,17 @@ void dl_server_start(struct sched_dl_entity *dl_se) { struct rq *rq = dl_se->rq; + /* + * XXX: the apply do not work fine at the init phase for the + * fair server because things are not yet set. We need to improve + * this before getting generic. + */ if (!dl_server(dl_se)) { /* Disabled */ - dl_se->dl_runtime = 0; - dl_se->dl_deadline = 1000 * NSEC_PER_MSEC; - dl_se->dl_period = 1000 * NSEC_PER_MSEC; + u64 runtime = 0; + u64 period = 1000 * NSEC_PER_MSEC; + + dl_server_apply_params(dl_se, runtime, period, 1); dl_se->dl_server = 1; dl_se->dl_defer = 1; @@ -1660,6 +1671,64 @@ void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_se->server_pick = pick; } +void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq) +{ + u64 new_bw = dl_se->dl_bw; + int cpu = cpu_of(rq); + struct dl_bw *dl_b; + + dl_b = dl_bw_of(cpu_of(rq)); + guard(raw_spinlock)(&dl_b->lock); + + if (!dl_bw_cpus(cpu)) + return; + + __dl_add(dl_b, new_bw, dl_bw_cpus(cpu)); +} + +int dl_server_apply_params(struct sched_dl_entity *dl_se, u64 runtime, u64 period, bool init) +{ + u64 old_bw = init ? 0 : to_ratio(dl_se->dl_period, dl_se->dl_runtime); + u64 new_bw = to_ratio(period, runtime); + struct rq *rq = dl_se->rq; + int cpu = cpu_of(rq); + struct dl_bw *dl_b; + unsigned long cap; + int retval = 0; + int cpus; + + dl_b = dl_bw_of(cpu); + guard(raw_spinlock)(&dl_b->lock); + + cpus = dl_bw_cpus(cpu); + cap = dl_bw_capacity(cpu); + + if (__dl_overflow(dl_b, cap, old_bw, new_bw)) + return -EBUSY; + + if (init) { + __add_rq_bw(new_bw, &rq->dl); + __dl_add(dl_b, new_bw, cpus); + } else { + __dl_sub(dl_b, dl_se->dl_bw, cpus); + __dl_add(dl_b, new_bw, cpus); + + dl_rq_change_utilization(rq, dl_se, new_bw); + } + + dl_se->dl_runtime = runtime; + dl_se->dl_deadline = period; + dl_se->dl_period = period; + + dl_se->runtime = 0; + dl_se->deadline = 0; + + dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); + dl_se->dl_density = to_ratio(dl_se->dl_deadline, dl_se->dl_runtime); + + return retval; +} + /* * Update the current task's runtime statistics (provided it is still * a -deadline task and has not been removed from the dl_rq). diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 90c4a9998377..72f2715dec78 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -333,8 +333,165 @@ static const struct file_operations sched_debug_fops = { .release = seq_release, }; +enum dl_param { + DL_RUNTIME = 0, + DL_PERIOD, +}; + +static unsigned long fair_server_period_max = (1 << 22) * NSEC_PER_USEC; /* ~4 seconds */ +static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */ + +static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos, enum dl_param param) +{ + long cpu = (long) ((struct seq_file *) filp->private_data)->private; + struct rq *rq = cpu_rq(cpu); + u64 runtime, period; + size_t err; + int retval; + u64 value; + + err = kstrtoull_from_user(ubuf, cnt, 10, &value); + if (err) + return err; + + scoped_guard (rq_lock_irqsave, rq) { + runtime = rq->fair_server.dl_runtime; + period = rq->fair_server.dl_period; + + switch (param) { + case DL_RUNTIME: + if (runtime == value) + break; + runtime = value; + break; + case DL_PERIOD: + if (value == period) + break; + period = value; + break; + } + + if (runtime > period || + period > fair_server_period_max || + period < fair_server_period_min) { + return -EINVAL; + } + + if (rq->cfs.h_nr_running) { + update_rq_clock(rq); + dl_server_stop(&rq->fair_server); + } + + retval = dl_server_apply_params(&rq->fair_server, runtime, period, 0); + if (retval) + cnt = retval; + + if (!runtime) + printk_deferred("Fair server disabled in CPU %d, system may crash due to starvation.\n", + cpu_of(rq)); + + if (rq->cfs.h_nr_running) + dl_server_start(&rq->fair_server); + } + + *ppos += cnt; + return cnt; +} + +static size_t sched_fair_server_show(struct seq_file *m, void *v, enum dl_param param) +{ + unsigned long cpu = (unsigned long) m->private; + struct rq *rq = cpu_rq(cpu); + u64 value; + + switch (param) { + case DL_RUNTIME: + value = rq->fair_server.dl_runtime; + break; + case DL_PERIOD: + value = rq->fair_server.dl_period; + break; + } + + seq_printf(m, "%llu\n", value); + return 0; + +} + +static ssize_t +sched_fair_server_runtime_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_RUNTIME); +} + +static int sched_fair_server_runtime_show(struct seq_file *m, void *v) +{ + return sched_fair_server_show(m, v, DL_RUNTIME); +} + +static int sched_fair_server_runtime_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_fair_server_runtime_show, inode->i_private); +} + +static const struct file_operations fair_server_runtime_fops = { + .open = sched_fair_server_runtime_open, + .write = sched_fair_server_runtime_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static ssize_t +sched_fair_server_period_write(struct file *filp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + return sched_fair_server_write(filp, ubuf, cnt, ppos, DL_PERIOD); +} + +static int sched_fair_server_period_show(struct seq_file *m, void *v) +{ + return sched_fair_server_show(m, v, DL_PERIOD); +} + +static int sched_fair_server_period_open(struct inode *inode, struct file *filp) +{ + return single_open(filp, sched_fair_server_period_show, inode->i_private); +} + +static const struct file_operations fair_server_period_fops = { + .open = sched_fair_server_period_open, + .write = sched_fair_server_period_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + static struct dentry *debugfs_sched; +static void debugfs_fair_server_init(void) +{ + struct dentry *d_fair; + unsigned long cpu; + + d_fair = debugfs_create_dir("fair_server", debugfs_sched); + if (!d_fair) + return; + + for_each_possible_cpu(cpu) { + struct dentry *d_cpu; + char buf[32]; + + snprintf(buf, sizeof(buf), "cpu%lu", cpu); + d_cpu = debugfs_create_dir(buf, d_fair); + + debugfs_create_file("runtime", 0644, d_cpu, (void *) cpu, &fair_server_runtime_fops); + debugfs_create_file("period", 0644, d_cpu, (void *) cpu, &fair_server_period_fops); + } +} + static __init int sched_init_debug(void) { struct dentry __maybe_unused *numa; @@ -374,6 +531,8 @@ static __init int sched_init_debug(void) debugfs_create_file("debug", 0444, debugfs_sched, NULL, &sched_debug_fops); + debugfs_fair_server_init(); + return 0; } late_initcall(sched_init_debug); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64fb6776664e..b777ac361543 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -366,6 +366,9 @@ extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, extern void dl_server_update_idle_time(struct rq *rq, struct task_struct *p); extern void fair_server_init(struct rq *rq); +extern void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq); +extern int dl_server_apply_params(struct sched_dl_entity *dl_se, + u64 runtime, u64 period, bool init); #ifdef CONFIG_CGROUP_SCHED diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 76504b776d03..9748a4c8d668 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -516,6 +516,14 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd) if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) set_rq_online(rq); + /* + * Because the rq is not a task, dl_add_task_root_domain() did not + * move the fair server bw to the rd if it already started. + * Add it now. + */ + if (rq->fair_server.dl_server) + __dl_server_attach_root(&rq->fair_server, rq); + rq_unlock_irqrestore(rq, &rf); if (old_rd) -- cgit v1.2.3 From 4b26cfdd395638918e370f62bd2c33e6f63ffb5e Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 27 May 2024 14:06:53 +0200 Subject: sched/core: Fix priority checking for DL server picks In core scheduling, a DL server pick (which is CFS task) should be given higher priority than tasks in other classes. Not doing so causes CFS starvation. A kselftest is added later to demonstrate this. A CFS task that is competing with RT tasks can be completely starved without this and the DL server's boosting completely ignored. Fix these problems. Reported-by: Suleiman Souhlal Signed-off-by: "Joel Fernandes (Google)" Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vineeth Pillai Tested-by: Juri Lelli Link: https://lore.kernel.org/r/48b78521d86f3b33c24994d843c1aad6b987dda9.1716811044.git.bristot@kernel.org --- kernel/sched/core.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index f95600c092d4..11abfcdafd8d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -163,6 +163,9 @@ static inline int __task_prio(const struct task_struct *p) if (p->sched_class == &stop_sched_class) /* trumps deadline */ return -2; + if (p->dl_server) + return -1; /* deadline */ + if (rt_prio(p->prio)) /* includes deadline */ return p->prio; /* [-1, 99] */ @@ -192,8 +195,24 @@ static inline bool prio_less(const struct task_struct *a, if (-pb < -pa) return false; - if (pa == -1) /* dl_prio() doesn't work because of stop_class above */ - return !dl_time_before(a->dl.deadline, b->dl.deadline); + if (pa == -1) { /* dl_prio() doesn't work because of stop_class above */ + const struct sched_dl_entity *a_dl, *b_dl; + + a_dl = &a->dl; + /* + * Since,'a' and 'b' can be CFS tasks served by DL server, + * __task_prio() can return -1 (for DL) even for those. In that + * case, get to the dl_server's DL entity. + */ + if (a->dl_server) + a_dl = a->dl_server; + + b_dl = &b->dl; + if (b->dl_server) + b_dl = b->dl_server; + + return !dl_time_before(a_dl->deadline, b_dl->deadline); + } if (pa == MAX_RT_PRIO + MAX_NICE) /* fair */ return cfs_prio_less(a, b, in_fi); -- cgit v1.2.3 From c8a85394cfdb4696b4e2f8a0f3066a1c921af426 Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 27 May 2024 14:06:54 +0200 Subject: sched/core: Fix picking of tasks for core scheduling with DL server * Use simple CFS pick_task for DL pick_task DL server's pick_task calls CFS's pick_next_task_fair(), this is wrong because core scheduling's pick_task only calls CFS's pick_task() for evaluation / checking of the CFS task (comparing across CPUs), not for actually affirmatively picking the next task. This causes RB tree corruption issues in CFS that were found by syzbot. * Make pick_task_fair clear DL server A DL task pick might set ->dl_server, but it is possible the task will never run (say the other HT has a stop task). If the CFS task is picked in the future directly (say without DL server), ->dl_server will be set. So clear it in pick_task_fair(). This fixes the KASAN issue reported by syzbot in set_next_entity(). (DL refactoring suggestions by Vineeth Pillai). Reported-by: Suleiman Souhlal Signed-off-by: "Joel Fernandes (Google)" Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vineeth Pillai Tested-by: Juri Lelli Link: https://lore.kernel.org/r/b10489ab1f03d23e08e6097acea47442e7d6466f.1716811044.git.bristot@kernel.org --- include/linux/sched.h | 3 ++- kernel/sched/deadline.c | 27 ++++++++++++++++++++++----- kernel/sched/fair.c | 23 +++++++++++++++++++++-- kernel/sched/sched.h | 3 ++- 4 files changed, 47 insertions(+), 9 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4edd7e2096fb..2c1b4ee3234f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -686,7 +686,8 @@ struct sched_dl_entity { */ struct rq *rq; dl_server_has_tasks_f server_has_tasks; - dl_server_pick_f server_pick; + dl_server_pick_f server_pick_next; + dl_server_pick_f server_pick_task; #ifdef CONFIG_RT_MUTEXES /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 747c0c51d753..8571bc9b37cc 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1664,11 +1664,13 @@ void dl_server_stop(struct sched_dl_entity *dl_se) void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick) + dl_server_pick_f pick_next, + dl_server_pick_f pick_task) { dl_se->rq = rq; dl_se->server_has_tasks = has_tasks; - dl_se->server_pick = pick; + dl_se->server_pick_next = pick_next; + dl_se->server_pick_task = pick_task; } void __dl_server_attach_root(struct sched_dl_entity *dl_se, struct rq *rq) @@ -2399,7 +2401,12 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) return __node_2_dle(left); } -static struct task_struct *pick_task_dl(struct rq *rq) +/* + * __pick_next_task_dl - Helper to pick the next -deadline task to run. + * @rq: The runqueue to pick the next task from. + * @peek: If true, just peek at the next task. Only relevant for dlserver. + */ +static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) { struct sched_dl_entity *dl_se; struct dl_rq *dl_rq = &rq->dl; @@ -2413,7 +2420,10 @@ again: WARN_ON_ONCE(!dl_se); if (dl_server(dl_se)) { - p = dl_se->server_pick(dl_se); + if (IS_ENABLED(CONFIG_SMP) && peek) + p = dl_se->server_pick_task(dl_se); + else + p = dl_se->server_pick_next(dl_se); if (!p) { WARN_ON_ONCE(1); dl_se->dl_yielded = 1; @@ -2428,11 +2438,18 @@ again: return p; } +#ifdef CONFIG_SMP +static struct task_struct *pick_task_dl(struct rq *rq) +{ + return __pick_next_task_dl(rq, true); +} +#endif + static struct task_struct *pick_next_task_dl(struct rq *rq) { struct task_struct *p; - p = pick_task_dl(rq); + p = __pick_next_task_dl(rq, false); if (!p) return p; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1ea5ec81431a..ee251ac50398 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8479,6 +8479,14 @@ again: cfs_rq = group_cfs_rq(se); } while (cfs_rq); + /* + * This can be called from directly from CFS's ->pick_task() or indirectly + * from DL's ->pick_task when fair server is enabled. In the indirect case, + * DL will set ->dl_server just after this function is called, so its Ok to + * clear. In the direct case, we are picking directly so we must clear it. + */ + task_of(se)->dl_server = NULL; + return task_of(se); } #endif @@ -8638,7 +8646,16 @@ static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) return !!dl_se->rq->cfs.nr_running; } -static struct task_struct *fair_server_pick(struct sched_dl_entity *dl_se) +static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se) +{ +#ifdef CONFIG_SMP + return pick_task_fair(dl_se->rq); +#else + return NULL; +#endif +} + +static struct task_struct *fair_server_pick_next(struct sched_dl_entity *dl_se) { return pick_next_task_fair(dl_se->rq, NULL, NULL); } @@ -8649,7 +8666,9 @@ void fair_server_init(struct rq *rq) init_dl_entity(dl_se); - dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick); + dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_next, + fair_server_pick_task); + } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index b777ac361543..f7e028b2e34e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -361,7 +361,8 @@ extern void dl_server_start(struct sched_dl_entity *dl_se); extern void dl_server_stop(struct sched_dl_entity *dl_se); extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick); + dl_server_pick_f pick_next, + dl_server_pick_f pick_task); extern void dl_server_update_idle_time(struct rq *rq, struct task_struct *p); -- cgit v1.2.3 From 5f6bd380c7bdbe10f7b4e8ddcceed60ce0714c6d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 27 May 2024 14:06:55 +0200 Subject: sched/rt: Remove default bandwidth control Now that fair_server exists, we no longer need RT bandwidth control unless RT_GROUP_SCHED. Enable fair_server with parameters equivalent to RT throttling. Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: "Peter Zijlstra (Intel)" Signed-off-by: Daniel Bristot de Oliveira Signed-off-by: "Vineeth Pillai (Google)" Signed-off-by: Peter Zijlstra (Intel) Tested-by: Juri Lelli Link: https://lore.kernel.org/r/14d562db55df5c3c780d91940743acb166895ef7.1716811044.git.bristot@kernel.org --- kernel/sched/core.c | 9 +- kernel/sched/deadline.c | 5 +- kernel/sched/debug.c | 3 + kernel/sched/rt.c | 242 +++++++++++++++++++++--------------------------- kernel/sched/sched.h | 3 +- 5 files changed, 120 insertions(+), 142 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 11abfcdafd8d..29fde993d3f8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8266,8 +8266,6 @@ void __init sched_init(void) #endif /* CONFIG_RT_GROUP_SCHED */ } - init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); - #ifdef CONFIG_SMP init_defrootdomain(); #endif @@ -8322,8 +8320,13 @@ void __init sched_init(void) init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ - rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime; #ifdef CONFIG_RT_GROUP_SCHED + /* + * This is required for init cpu because rt.c:__enable_runtime() + * starts working after scheduler_running, which is not the case + * yet. + */ + rq->rt.rt_runtime = global_rt_runtime(); init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL); #endif #ifdef CONFIG_SMP diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 8571bc9b37cc..c5f1cc753a31 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1554,6 +1554,7 @@ throttle: if (dl_se == &rq->fair_server) return; +#ifdef CONFIG_RT_GROUP_SCHED /* * Because -- for now -- we share the rt bandwidth, we need to * account our runtime there too, otherwise actual rt tasks @@ -1578,6 +1579,7 @@ throttle: rt_rq->rt_time += delta_exec; raw_spin_unlock(&rt_rq->rt_runtime_lock); } +#endif } /* @@ -1632,8 +1634,7 @@ void dl_server_start(struct sched_dl_entity *dl_se) * this before getting generic. */ if (!dl_server(dl_se)) { - /* Disabled */ - u64 runtime = 0; + u64 runtime = 50 * NSEC_PER_MSEC; u64 period = 1000 * NSEC_PER_MSEC; dl_server_apply_params(dl_se, runtime, period, 1); diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 72f2715dec78..e75914e7d2a8 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -885,9 +885,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq) SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x)) PU(rt_nr_running); + +#ifdef CONFIG_RT_GROUP_SCHED P(rt_throttled); PN(rt_time); PN(rt_runtime); +#endif #undef PN #undef PU diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 310523c1b9e3..a8731da04cd5 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -8,10 +8,6 @@ int sched_rr_timeslice = RR_TIMESLICE; /* More than 4 hours if BW_SHIFT equals 20. */ static const u64 max_rt_runtime = MAX_BW; -static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); - -struct rt_bandwidth def_rt_bandwidth; - /* * period over which we measure -rt task CPU usage in us. * default: 1s @@ -66,6 +62,40 @@ static int __init sched_rt_sysctl_init(void) late_initcall(sched_rt_sysctl_init); #endif +void init_rt_rq(struct rt_rq *rt_rq) +{ + struct rt_prio_array *array; + int i; + + array = &rt_rq->active; + for (i = 0; i < MAX_RT_PRIO; i++) { + INIT_LIST_HEAD(array->queue + i); + __clear_bit(i, array->bitmap); + } + /* delimiter for bitsearch: */ + __set_bit(MAX_RT_PRIO, array->bitmap); + +#if defined CONFIG_SMP + rt_rq->highest_prio.curr = MAX_RT_PRIO-1; + rt_rq->highest_prio.next = MAX_RT_PRIO-1; + rt_rq->overloaded = 0; + plist_head_init(&rt_rq->pushable_tasks); +#endif /* CONFIG_SMP */ + /* We start is dequeued state, because no RT tasks are queued */ + rt_rq->rt_queued = 0; + +#ifdef CONFIG_RT_GROUP_SCHED + rt_rq->rt_time = 0; + rt_rq->rt_throttled = 0; + rt_rq->rt_runtime = 0; + raw_spin_lock_init(&rt_rq->rt_runtime_lock); +#endif +} + +#ifdef CONFIG_RT_GROUP_SCHED + +static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun); + static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer) { struct rt_bandwidth *rt_b = @@ -130,35 +160,6 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) do_start_rt_bandwidth(rt_b); } -void init_rt_rq(struct rt_rq *rt_rq) -{ - struct rt_prio_array *array; - int i; - - array = &rt_rq->active; - for (i = 0; i < MAX_RT_PRIO; i++) { - INIT_LIST_HEAD(array->queue + i); - __clear_bit(i, array->bitmap); - } - /* delimiter for bit-search: */ - __set_bit(MAX_RT_PRIO, array->bitmap); - -#if defined CONFIG_SMP - rt_rq->highest_prio.curr = MAX_RT_PRIO-1; - rt_rq->highest_prio.next = MAX_RT_PRIO-1; - rt_rq->overloaded = 0; - plist_head_init(&rt_rq->pushable_tasks); -#endif /* CONFIG_SMP */ - /* We start is dequeued state, because no RT tasks are queued */ - rt_rq->rt_queued = 0; - - rt_rq->rt_time = 0; - rt_rq->rt_throttled = 0; - rt_rq->rt_runtime = 0; - raw_spin_lock_init(&rt_rq->rt_runtime_lock); -} - -#ifdef CONFIG_RT_GROUP_SCHED static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b) { hrtimer_cancel(&rt_b->rt_period_timer); @@ -195,7 +196,6 @@ void unregister_rt_sched_group(struct task_group *tg) { if (tg->rt_se) destroy_rt_bandwidth(&tg->rt_bandwidth); - } void free_rt_sched_group(struct task_group *tg) @@ -253,8 +253,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent) if (!tg->rt_se) goto err; - init_rt_bandwidth(&tg->rt_bandwidth, - ktime_to_ns(def_rt_bandwidth.rt_period), 0); + init_rt_bandwidth(&tg->rt_bandwidth, ktime_to_ns(global_rt_period()), 0); for_each_possible_cpu(i) { rt_rq = kzalloc_node(sizeof(struct rt_rq), @@ -604,70 +603,6 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) return &rt_rq->tg->rt_bandwidth; } -#else /* !CONFIG_RT_GROUP_SCHED */ - -static inline u64 sched_rt_runtime(struct rt_rq *rt_rq) -{ - return rt_rq->rt_runtime; -} - -static inline u64 sched_rt_period(struct rt_rq *rt_rq) -{ - return ktime_to_ns(def_rt_bandwidth.rt_period); -} - -typedef struct rt_rq *rt_rq_iter_t; - -#define for_each_rt_rq(rt_rq, iter, rq) \ - for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) - -#define for_each_sched_rt_entity(rt_se) \ - for (; rt_se; rt_se = NULL) - -static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) -{ - return NULL; -} - -static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) -{ - struct rq *rq = rq_of_rt_rq(rt_rq); - - if (!rt_rq->rt_nr_running) - return; - - enqueue_top_rt_rq(rt_rq); - resched_curr(rq); -} - -static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) -{ - dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); -} - -static inline int rt_rq_throttled(struct rt_rq *rt_rq) -{ - return rt_rq->rt_throttled; -} - -static inline const struct cpumask *sched_rt_period_mask(void) -{ - return cpu_online_mask; -} - -static inline -struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) -{ - return &cpu_rq(cpu)->rt; -} - -static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq) -{ - return &def_rt_bandwidth; -} - -#endif /* CONFIG_RT_GROUP_SCHED */ - bool sched_rt_bandwidth_account(struct rt_rq *rt_rq) { struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); @@ -859,7 +794,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) const struct cpumask *span; span = sched_rt_period_mask(); -#ifdef CONFIG_RT_GROUP_SCHED + /* * FIXME: isolated CPUs should really leave the root task group, * whether they are isolcpus or were isolated via cpusets, lest @@ -871,7 +806,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) */ if (rt_b == &root_task_group.rt_bandwidth) span = cpu_online_mask; -#endif + for_each_cpu(i, span) { int enqueue = 0; struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); @@ -938,18 +873,6 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) return idle; } -static inline int rt_se_prio(struct sched_rt_entity *rt_se) -{ -#ifdef CONFIG_RT_GROUP_SCHED - struct rt_rq *rt_rq = group_rt_rq(rt_se); - - if (rt_rq) - return rt_rq->highest_prio.curr; -#endif - - return rt_task_of(rt_se)->prio; -} - static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) { u64 runtime = sched_rt_runtime(rt_rq); @@ -993,6 +916,72 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) return 0; } +#else /* !CONFIG_RT_GROUP_SCHED */ + +typedef struct rt_rq *rt_rq_iter_t; + +#define for_each_rt_rq(rt_rq, iter, rq) \ + for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL) + +#define for_each_sched_rt_entity(rt_se) \ + for (; rt_se; rt_se = NULL) + +static inline struct rt_rq *group_rt_rq(struct sched_rt_entity *rt_se) +{ + return NULL; +} + +static inline void sched_rt_rq_enqueue(struct rt_rq *rt_rq) +{ + struct rq *rq = rq_of_rt_rq(rt_rq); + + if (!rt_rq->rt_nr_running) + return; + + enqueue_top_rt_rq(rt_rq); + resched_curr(rq); +} + +static inline void sched_rt_rq_dequeue(struct rt_rq *rt_rq) +{ + dequeue_top_rt_rq(rt_rq, rt_rq->rt_nr_running); +} + +static inline int rt_rq_throttled(struct rt_rq *rt_rq) +{ + return false; +} + +static inline const struct cpumask *sched_rt_period_mask(void) +{ + return cpu_online_mask; +} + +static inline +struct rt_rq *sched_rt_period_rt_rq(struct rt_bandwidth *rt_b, int cpu) +{ + return &cpu_rq(cpu)->rt; +} + +#ifdef CONFIG_SMP +static void __enable_runtime(struct rq *rq) { } +static void __disable_runtime(struct rq *rq) { } +#endif + +#endif /* CONFIG_RT_GROUP_SCHED */ + +static inline int rt_se_prio(struct sched_rt_entity *rt_se) +{ +#ifdef CONFIG_RT_GROUP_SCHED + struct rt_rq *rt_rq = group_rt_rq(rt_se); + + if (rt_rq) + return rt_rq->highest_prio.curr; +#endif + + return rt_task_of(rt_se)->prio; +} + /* * Update the current task's runtime statistics. Skip current tasks that * are not in our scheduling class. @@ -1000,7 +989,6 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) static void update_curr_rt(struct rq *rq) { struct task_struct *curr = rq->curr; - struct sched_rt_entity *rt_se = &curr->rt; s64 delta_exec; if (curr->sched_class != &rt_sched_class) @@ -1010,6 +998,9 @@ static void update_curr_rt(struct rq *rq) if (unlikely(delta_exec <= 0)) return; +#ifdef CONFIG_RT_GROUP_SCHED + struct sched_rt_entity *rt_se = &curr->rt; + if (!rt_bandwidth_enabled()) return; @@ -1028,6 +1019,7 @@ static void update_curr_rt(struct rq *rq) do_start_rt_bandwidth(sched_rt_bandwidth(rt_rq)); } } +#endif } static void @@ -1184,7 +1176,6 @@ dec_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) static void inc_rt_group(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) { - start_rt_bandwidth(&def_rt_bandwidth); } static inline @@ -2912,19 +2903,6 @@ int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk) #ifdef CONFIG_SYSCTL static int sched_rt_global_constraints(void) { - unsigned long flags; - int i; - - raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); - for_each_possible_cpu(i) { - struct rt_rq *rt_rq = &cpu_rq(i)->rt; - - raw_spin_lock(&rt_rq->rt_runtime_lock); - rt_rq->rt_runtime = global_rt_runtime(); - raw_spin_unlock(&rt_rq->rt_runtime_lock); - } - raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); - return 0; } #endif /* CONFIG_SYSCTL */ @@ -2944,12 +2922,6 @@ static int sched_rt_global_validate(void) static void sched_rt_do_global(void) { - unsigned long flags; - - raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags); - def_rt_bandwidth.rt_runtime = global_rt_runtime(); - def_rt_bandwidth.rt_period = ns_to_ktime(global_rt_period()); - raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags); } static int sched_rt_handler(const struct ctl_table *table, int write, void *buffer, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index f7e028b2e34e..1e1d1b467af2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -729,13 +729,13 @@ struct rt_rq { #endif /* CONFIG_SMP */ int rt_queued; +#ifdef CONFIG_RT_GROUP_SCHED int rt_throttled; u64 rt_time; u64 rt_runtime; /* Nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; -#ifdef CONFIG_RT_GROUP_SCHED unsigned int rt_nr_boosted; struct rq *rq; @@ -2519,7 +2519,6 @@ extern void reweight_task(struct task_struct *p, const struct load_weight *lw); extern void resched_curr(struct rq *rq); extern void resched_cpu(int cpu); -extern struct rt_bandwidth def_rt_bandwidth; extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); -- cgit v1.2.3 From cea5a3472ac43f18590e1bd6b842f808347a810c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Jul 2024 16:27:04 +0200 Subject: sched/fair: Cleanup fair_server The throttle interaction made my brain hurt, make it consistently about 0 transitions of h_nr_running. Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/fair.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index ee251ac50398..795ceef5e7e1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5849,10 +5849,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) /* At this point se is NULL and we are at root level*/ sub_nr_running(rq, task_delta); -done: /* Stop the fair server if throttling resulted in no runnable tasks */ if (rq_h_nr_running && !rq->cfs.h_nr_running) dl_server_stop(&rq->fair_server); +done: /* * Note: distribution will already see us throttled via the * throttled-list. rq->lock protects completion. @@ -5940,16 +5940,16 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) goto unthrottle_throttle; } + /* Start the fair server if un-throttling resulted in new runnable tasks */ + if (!rq_h_nr_running && rq->cfs.h_nr_running) + dl_server_start(&rq->fair_server); + /* At this point se is NULL and we are at root level*/ add_nr_running(rq, task_delta); unthrottle_throttle: assert_list_leaf_cfs_rq(rq); - /* Start the fair server if un-throttling resulted in new runnable tasks */ - if (!rq_h_nr_running && rq->cfs.h_nr_running) - dl_server_start(&rq->fair_server); - /* Determine whether we need to wake up potentially idle CPU: */ if (rq->curr == rq->idle && rq->cfs.nr_running) resched_curr(rq); @@ -6771,6 +6771,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) struct sched_entity *se = &p->se; int idle_h_nr_running = task_has_idle_policy(p); int task_new = !(flags & ENQUEUE_WAKEUP); + int rq_h_nr_running = rq->cfs.h_nr_running; /* * The code below (indirectly) updates schedutil which looks at @@ -6780,13 +6781,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) */ util_est_enqueue(&rq->cfs, p); - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) { - /* Account for idle runtime */ - if (!rq->nr_running) - dl_server_update_idle_time(rq, rq->curr); - dl_server_start(&rq->fair_server); - } - /* * If in_iowait is set, the code below may not trigger any cpufreq * utilization updates, so do it here explicitly with the IOWAIT flag @@ -6832,6 +6826,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) goto enqueue_throttle; } + if (!rq_h_nr_running && rq->cfs.h_nr_running) { + /* Account for idle runtime */ + if (!rq->nr_running) + dl_server_update_idle_time(rq, rq->curr); + dl_server_start(&rq->fair_server); + } + /* At this point se is NULL and we are at root level*/ add_nr_running(rq, 1); @@ -6872,6 +6873,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) int task_sleep = flags & DEQUEUE_SLEEP; int idle_h_nr_running = task_has_idle_policy(p); bool was_sched_idle = sched_idle_rq(rq); + int rq_h_nr_running = rq->cfs.h_nr_running; util_est_dequeue(&rq->cfs, p); @@ -6926,14 +6928,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) /* At this point se is NULL and we are at root level*/ sub_nr_running(rq, 1); + if (rq_h_nr_running && !rq->cfs.h_nr_running) + dl_server_stop(&rq->fair_server); + /* balance early to pull high priority tasks */ if (unlikely(!was_sched_idle && sched_idle_rq(rq))) rq->next_balance = jiffies; dequeue_throttle: - if (!throttled_hierarchy(task_cfs_rq(p)) && !rq->cfs.h_nr_running) - dl_server_stop(&rq->fair_server); - util_est_update(&rq->cfs, p, task_sleep); hrtick_update(rq); } -- cgit v1.2.3 From 924e2904da9b5edec61611918b98ab1f7fccc461 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Mon, 5 Aug 2024 12:39:10 -1000 Subject: sched/fair: Make balance_fair() test sched_fair_runnable() instead of rq->nr_running balance_fair() skips newidle balancing if rq->nr_running - there are already tasks on the rq, so no need to try to pull tasks. This tests the total number of queued tasks on the CPU instead of only the fair class, but is still correct as the rq can currently only have fair class tasks while balance_fair() is running. However, with the addition of sched_ext below the fair class, this will not hold anymore and make put_prev_task_balance() skip sched_ext's balance() incorrectly as, when a CPU has only lower priority class tasks, rq->nr_running would still be positive and balance_fair() would return 1 even when fair doesn't have any tasks to run. Update balance_fair() to use sched_fair_runnable() which tests rq->cfs.nr_running which is updated by bandwidth throttling. Note that pick_next_task_fair() already uses sched_fair_runnable() in its optimized path for the same purpose. Reported-by: Peter Zijlstra Signed-off-by: Tejun Heo Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Chengming Zhou Reviewed-by: K Prateek Nayak Link: https://lore.kernel.org/r/ZrFUjlCf7x3TNXB8@slm.duckdns.org --- kernel/sched/fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 795ceef5e7e1..6d39a824bbe1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8355,7 +8355,7 @@ static void set_cpus_allowed_fair(struct task_struct *p, struct affinity_context static int balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { - if (rq->nr_running) + if (sched_fair_runnable(rq)) return 1; return sched_balance_newidle(rq, rf) != 0; -- cgit v1.2.3 From 4ae0c2b91110dab6f4291c2c7f99dde60ecc97d8 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 1 Aug 2024 10:44:03 -0500 Subject: sched/debug: Fix fair_server_period_max value This code has an integer overflow or sign extension bug which was caught by gcc-13: kernel/sched/debug.c:341:57: error: integer overflow in expression of type 'long int' results in '-100663296' [-Werror=overflow] 341 | static unsigned long fair_server_period_max = (1 << 22) * NSEC_PER_USEC; /* ~4 seconds */ The result is that "fair_server_period_max" is set to 0xfffffffffa000000 (585 years) instead of instead of 0xfa000000 (4 seconds) that was intended. Fix this by changing the type to shift from (1 << 22) to (1UL << 22). Closes: https://lore.kernel.org/all/CA+G9fYtE2GAbeqU+AOCffgo2oH0RTJUxU+=Pi3cFn4di_KgBAQ@mail.gmail.com/ Fixes: d741f297bcea ("sched/fair: Fair server interface") Reported-by: Linux Kernel Functional Testing Reported-by: Arnd Bergmann Reported-by: Stephen Rothwell Signed-off-by: Dan Carpenter Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/a936b991-e464-4bdf-94ab-08e25d364986@stanley.mountain --- kernel/sched/debug.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index e75914e7d2a8..831a77ab8466 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -338,7 +338,7 @@ enum dl_param { DL_PERIOD, }; -static unsigned long fair_server_period_max = (1 << 22) * NSEC_PER_USEC; /* ~4 seconds */ +static unsigned long fair_server_period_max = (1UL << 22) * NSEC_PER_USEC; /* ~4 seconds */ static unsigned long fair_server_period_min = (100) * NSEC_PER_USEC; /* 100 us */ static ssize_t sched_fair_server_write(struct file *filp, const char __user *ubuf, -- cgit v1.2.3 From 130fd056dd82b02db9a661c013071af35309be1a Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Mon, 10 Jun 2024 20:20:16 +0100 Subject: sched/rt: Clean up usage of rt_task() rt_task() checks if a task has RT priority. But depends on your dictionary, this could mean it belongs to RT class, or is a 'realtime' task, which includes RT and DL classes. Since this has caused some confusion already on discussion [1], it seemed a clean up is due. I define the usage of rt_task() to be tasks that belong to RT class. Make sure that it returns true only for RT class and audit the users and replace the ones required the old behavior with the new realtime_task() which returns true for RT and DL classes. Introduce similar realtime_prio() to create similar distinction to rt_prio() and update the users that required the old behavior to use the new function. Move MAX_DL_PRIO to prio.h so it can be used in the new definitions. Document the functions to make it more obvious what is the difference between them. PI-boosted tasks is a factor that must be taken into account when choosing which function to use. Rename task_is_realtime() to realtime_task_policy() as the old name is confusing against the new realtime_task(). No functional changes were intended. [1] https://lore.kernel.org/lkml/20240506100509.GL40213@noisy.programming.kicks-ass.net/ Signed-off-by: Qais Yousef Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Phil Auld Reviewed-by: "Steven Rostedt (Google)" Reviewed-by: Sebastian Andrzej Siewior Link: https://lore.kernel.org/r/20240610192018.1567075-2-qyousef@layalina.io --- fs/bcachefs/six.c | 2 +- fs/select.c | 2 +- include/linux/ioprio.h | 2 +- include/linux/sched/deadline.h | 6 ++++-- include/linux/sched/prio.h | 1 + include/linux/sched/rt.h | 27 ++++++++++++++++++++++++++- kernel/locking/rtmutex.c | 4 ++-- kernel/locking/rwsem.c | 4 ++-- kernel/locking/ww_mutex.h | 2 +- kernel/sched/core.c | 4 ++-- kernel/sched/syscalls.c | 2 +- kernel/time/hrtimer.c | 6 +++--- kernel/trace/trace_sched_wakeup.c | 2 +- mm/page-writeback.c | 4 ++-- mm/page_alloc.c | 2 +- 15 files changed, 49 insertions(+), 21 deletions(-) diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index 3a494c5d1247..b30870bf7e4a 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock) */ rcu_read_lock(); struct task_struct *owner = READ_ONCE(lock->owner); - bool ret = owner ? owner_on_cpu(owner) : !rt_task(current); + bool ret = owner ? owner_on_cpu(owner) : !realtime_task(current); rcu_read_unlock(); return ret; diff --git a/fs/select.c b/fs/select.c index 9515c3fa1a03..8d5c1419416c 100644 --- a/fs/select.c +++ b/fs/select.c @@ -82,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv) * Realtime tasks get a slack of 0 for obvious reasons. */ - if (rt_task(current)) + if (realtime_task(current)) return 0; ktime_get_ts64(&now); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index db1249cd9692..75859b78d540 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; - else if (task_is_realtime(task)) + else if (realtime_task_policy(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index df3aca89d4f5..5cb88b748ad6 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -10,8 +10,6 @@ #include -#define MAX_DL_PRIO 0 - static inline int dl_prio(int prio) { if (unlikely(prio < MAX_DL_PRIO)) @@ -19,6 +17,10 @@ static inline int dl_prio(int prio) return 0; } +/* + * Returns true if a task has a priority that belongs to DL class. PI-boosted + * tasks will return true. Use dl_policy() to ignore PI-boosted tasks. + */ static inline int dl_task(struct task_struct *p) { return dl_prio(p->prio); diff --git a/include/linux/sched/prio.h b/include/linux/sched/prio.h index ab83d85e1183..6ab43b4f72f9 100644 --- a/include/linux/sched/prio.h +++ b/include/linux/sched/prio.h @@ -14,6 +14,7 @@ */ #define MAX_RT_PRIO 100 +#define MAX_DL_PRIO 0 #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index b2b9e6eb9683..a055dd68a77c 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -7,18 +7,43 @@ struct task_struct; static inline int rt_prio(int prio) +{ + if (unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO)) + return 1; + return 0; +} + +static inline int realtime_prio(int prio) { if (unlikely(prio < MAX_RT_PRIO)) return 1; return 0; } +/* + * Returns true if a task has a priority that belongs to RT class. PI-boosted + * tasks will return true. Use rt_policy() to ignore PI-boosted tasks. + */ static inline int rt_task(struct task_struct *p) { return rt_prio(p->prio); } -static inline bool task_is_realtime(struct task_struct *tsk) +/* + * Returns true if a task has a priority that belongs to RT or DL classes. + * PI-boosted tasks will return true. Use realtime_task_policy() to ignore + * PI-boosted tasks. + */ +static inline int realtime_task(struct task_struct *p) +{ + return realtime_prio(p->prio); +} + +/* + * Returns true if a task has a policy that belongs to RT or DL classes. + * PI-boosted tasks will return false. + */ +static inline bool realtime_task_policy(struct task_struct *tsk) { int policy = tsk->policy; diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 88d08eeb8bc0..55c9dab37f33 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task) { int prio = task->prio; - if (!rt_prio(prio)) + if (!realtime_prio(prio)) return DEFAULT_PRIO; return prio; @@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, * Note that RT tasks are excluded from same priority (lateral) * steals to prevent the introduction of an unbounded latency. */ - if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio)) + if (realtime_prio(waiter->tree.prio)) return false; return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 33cac79e3994..516174a64fa5 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, * if it is an RT task or wait in the wait queue * for too long. */ - if (has_handoff || (!rt_task(waiter->task) && + if (has_handoff || (!realtime_task(waiter->task) && !time_after(jiffies, waiter->timeout))) return false; @@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) if (owner_state != OWNER_WRITER) { if (need_resched()) break; - if (rt_task(current) && + if (realtime_task(current) && (prev_owner_state != OWNER_WRITER)) break; } diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index 3ad2cc4823e5..fa4b416a1f62 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) int a_prio = a->task->prio; int b_prio = b->task->prio; - if (rt_prio(a_prio) || rt_prio(b_prio)) { + if (realtime_prio(a_prio) || realtime_prio(b_prio)) { if (a_prio > b_prio) return true; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 29fde993d3f8..673cbeb7ad48 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -166,7 +166,7 @@ static inline int __task_prio(const struct task_struct *p) if (p->dl_server) return -1; /* deadline */ - if (rt_prio(p->prio)) /* includes deadline */ + if (realtime_prio(p->prio)) /* includes deadline */ return p->prio; /* [-1, 99] */ if (p->sched_class == &idle_sched_class) @@ -8590,7 +8590,7 @@ void normalize_rt_tasks(void) schedstat_set(p->stats.sleep_start, 0); schedstat_set(p->stats.block_start, 0); - if (!dl_task(p) && !rt_task(p)) { + if (!realtime_task(p)) { /* * Renice negative nice level userspace * tasks back to 0: diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index ae1b42775ef9..6d60326d73e4 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p) * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ - if (!rt_prio(p->prio)) + if (!realtime_prio(p->prio)) return p->normal_prio; return p->prio; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index b8ee320208d4..a1d1d8d886a8 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1975,7 +1975,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, * expiry. */ if (IS_ENABLED(CONFIG_PREEMPT_RT)) { - if (task_is_realtime(current) && !(mode & HRTIMER_MODE_SOFT)) + if (realtime_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) mode |= HRTIMER_MODE_HARD; } @@ -2075,7 +2075,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, u64 slack; slack = current->timer_slack_ns; - if (rt_task(current)) + if (realtime_task(current)) slack = 0; hrtimer_init_sleeper_on_stack(&t, clockid, mode); @@ -2280,7 +2280,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, * Override any slack passed by the user if under * rt contraints. */ - if (rt_task(current)) + if (realtime_task(current)) delta = 0; hrtimer_init_sleeper_on_stack(&t, clock_id, mode); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 130ca7e7787e..1824e17c93c7 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p) * - wakeup_dl handles tasks belonging to sched_dl class only. */ if (tracing_dl || (wakeup_dl && !dl_task(p)) || - (wakeup_rt && !dl_task(p) && !rt_task(p)) || + (wakeup_rt && !realtime_task(p)) || (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) return; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 4430ac68e4c4..78dcad729703 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; tsk = current; - if (rt_task(tsk)) { + if (realtime_task(tsk)) { bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; } @@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat) else dirty = vm_dirty_ratio * node_memory / 100; - if (rt_task(tsk)) + if (realtime_task(tsk)) dirty += dirty / 4; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 28f80daf5c04..54274e468d51 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4002,7 +4002,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) */ if (alloc_flags & ALLOC_MIN_RESERVE) alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(rt_task(current)) && in_task()) + } else if (unlikely(realtime_task(current)) && in_task()) alloc_flags |= ALLOC_MIN_RESERVE; alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); -- cgit v1.2.3 From b166af3db70fdcecf125662a2360471bb20be203 Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Mon, 10 Jun 2024 20:20:17 +0100 Subject: sched/rt, dl: Convert functions to return bool {rt, realtime, dl}_{task, prio}() functions' return value is actually a bool. Convert their return type to reflect that. Suggested-by: "Steven Rostedt (Google)" Signed-off-by: Qais Yousef Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Sebastian Andrzej Siewior Reviewed-by: "Steven Rostedt (Google)" Reviewed-by: Metin Kaya Link: https://lore.kernel.org/r/20240610192018.1567075-3-qyousef@layalina.io --- include/linux/sched/deadline.h | 8 +++----- include/linux/sched/rt.h | 16 ++++++---------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h index 5cb88b748ad6..3a912ab42bb5 100644 --- a/include/linux/sched/deadline.h +++ b/include/linux/sched/deadline.h @@ -10,18 +10,16 @@ #include -static inline int dl_prio(int prio) +static inline bool dl_prio(int prio) { - if (unlikely(prio < MAX_DL_PRIO)) - return 1; - return 0; + return unlikely(prio < MAX_DL_PRIO); } /* * Returns true if a task has a priority that belongs to DL class. PI-boosted * tasks will return true. Use dl_policy() to ignore PI-boosted tasks. */ -static inline int dl_task(struct task_struct *p) +static inline bool dl_task(struct task_struct *p) { return dl_prio(p->prio); } diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index a055dd68a77c..91ef1ef2019f 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -6,25 +6,21 @@ struct task_struct; -static inline int rt_prio(int prio) +static inline bool rt_prio(int prio) { - if (unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO)) - return 1; - return 0; + return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); } -static inline int realtime_prio(int prio) +static inline bool realtime_prio(int prio) { - if (unlikely(prio < MAX_RT_PRIO)) - return 1; - return 0; + return unlikely(prio < MAX_RT_PRIO); } /* * Returns true if a task has a priority that belongs to RT class. PI-boosted * tasks will return true. Use rt_policy() to ignore PI-boosted tasks. */ -static inline int rt_task(struct task_struct *p) +static inline bool rt_task(struct task_struct *p) { return rt_prio(p->prio); } @@ -34,7 +30,7 @@ static inline int rt_task(struct task_struct *p) * PI-boosted tasks will return true. Use realtime_task_policy() to ignore * PI-boosted tasks. */ -static inline int realtime_task(struct task_struct *p) +static inline bool realtime_task(struct task_struct *p) { return realtime_prio(p->prio); } -- cgit v1.2.3 From ae04f69de0bef93c7086cf2983dbc8e8fd624ebe Mon Sep 17 00:00:00 2001 From: Qais Yousef Date: Mon, 10 Jun 2024 20:20:18 +0100 Subject: sched/rt: Rename realtime_{prio, task}() to rt_or_dl_{prio, task}() Some find the name realtime overloaded. Use rt_or_dl() as an alternative, hopefully better, name. Suggested-by: Daniel Bristot de Oliveira Signed-off-by: Qais Yousef Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240610192018.1567075-4-qyousef@layalina.io --- fs/bcachefs/six.c | 2 +- fs/select.c | 2 +- include/linux/ioprio.h | 2 +- include/linux/sched/rt.h | 10 +++++----- kernel/locking/rtmutex.c | 4 ++-- kernel/locking/rwsem.c | 4 ++-- kernel/locking/ww_mutex.h | 2 +- kernel/sched/core.c | 4 ++-- kernel/sched/syscalls.c | 2 +- kernel/time/hrtimer.c | 6 +++--- kernel/trace/trace_sched_wakeup.c | 2 +- mm/page-writeback.c | 4 ++-- mm/page_alloc.c | 2 +- 13 files changed, 23 insertions(+), 23 deletions(-) diff --git a/fs/bcachefs/six.c b/fs/bcachefs/six.c index b30870bf7e4a..9cbd3c14c94f 100644 --- a/fs/bcachefs/six.c +++ b/fs/bcachefs/six.c @@ -335,7 +335,7 @@ static inline bool six_owner_running(struct six_lock *lock) */ rcu_read_lock(); struct task_struct *owner = READ_ONCE(lock->owner); - bool ret = owner ? owner_on_cpu(owner) : !realtime_task(current); + bool ret = owner ? owner_on_cpu(owner) : !rt_or_dl_task(current); rcu_read_unlock(); return ret; diff --git a/fs/select.c b/fs/select.c index 8d5c1419416c..73fce145eb72 100644 --- a/fs/select.c +++ b/fs/select.c @@ -82,7 +82,7 @@ u64 select_estimate_accuracy(struct timespec64 *tv) * Realtime tasks get a slack of 0 for obvious reasons. */ - if (realtime_task(current)) + if (rt_or_dl_task(current)) return 0; ktime_get_ts64(&now); diff --git a/include/linux/ioprio.h b/include/linux/ioprio.h index 75859b78d540..b25377b6ea98 100644 --- a/include/linux/ioprio.h +++ b/include/linux/ioprio.h @@ -40,7 +40,7 @@ static inline int task_nice_ioclass(struct task_struct *task) { if (task->policy == SCHED_IDLE) return IOPRIO_CLASS_IDLE; - else if (realtime_task_policy(task)) + else if (rt_or_dl_task_policy(task)) return IOPRIO_CLASS_RT; else return IOPRIO_CLASS_BE; diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 91ef1ef2019f..4e3338103654 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h @@ -11,7 +11,7 @@ static inline bool rt_prio(int prio) return unlikely(prio < MAX_RT_PRIO && prio >= MAX_DL_PRIO); } -static inline bool realtime_prio(int prio) +static inline bool rt_or_dl_prio(int prio) { return unlikely(prio < MAX_RT_PRIO); } @@ -27,19 +27,19 @@ static inline bool rt_task(struct task_struct *p) /* * Returns true if a task has a priority that belongs to RT or DL classes. - * PI-boosted tasks will return true. Use realtime_task_policy() to ignore + * PI-boosted tasks will return true. Use rt_or_dl_task_policy() to ignore * PI-boosted tasks. */ -static inline bool realtime_task(struct task_struct *p) +static inline bool rt_or_dl_task(struct task_struct *p) { - return realtime_prio(p->prio); + return rt_or_dl_prio(p->prio); } /* * Returns true if a task has a policy that belongs to RT or DL classes. * PI-boosted tasks will return false. */ -static inline bool realtime_task_policy(struct task_struct *tsk) +static inline bool rt_or_dl_task_policy(struct task_struct *tsk) { int policy = tsk->policy; diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index 55c9dab37f33..c2a530d704b4 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -347,7 +347,7 @@ static __always_inline int __waiter_prio(struct task_struct *task) { int prio = task->prio; - if (!realtime_prio(prio)) + if (!rt_or_dl_prio(prio)) return DEFAULT_PRIO; return prio; @@ -435,7 +435,7 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, * Note that RT tasks are excluded from same priority (lateral) * steals to prevent the introduction of an unbounded latency. */ - if (realtime_prio(waiter->tree.prio)) + if (rt_or_dl_prio(waiter->tree.prio)) return false; return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c index 516174a64fa5..5ded7dff46ef 100644 --- a/kernel/locking/rwsem.c +++ b/kernel/locking/rwsem.c @@ -631,7 +631,7 @@ static inline bool rwsem_try_write_lock(struct rw_semaphore *sem, * if it is an RT task or wait in the wait queue * for too long. */ - if (has_handoff || (!realtime_task(waiter->task) && + if (has_handoff || (!rt_or_dl_task(waiter->task) && !time_after(jiffies, waiter->timeout))) return false; @@ -914,7 +914,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) if (owner_state != OWNER_WRITER) { if (need_resched()) break; - if (realtime_task(current) && + if (rt_or_dl_task(current) && (prev_owner_state != OWNER_WRITER)) break; } diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index fa4b416a1f62..76d204b7d29c 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -237,7 +237,7 @@ __ww_ctx_less(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b) int a_prio = a->task->prio; int b_prio = b->task->prio; - if (realtime_prio(a_prio) || realtime_prio(b_prio)) { + if (rt_or_dl_prio(a_prio) || rt_or_dl_prio(b_prio)) { if (a_prio > b_prio) return true; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 673cbeb7ad48..ab50100363ca 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -166,7 +166,7 @@ static inline int __task_prio(const struct task_struct *p) if (p->dl_server) return -1; /* deadline */ - if (realtime_prio(p->prio)) /* includes deadline */ + if (rt_or_dl_prio(p->prio)) return p->prio; /* [-1, 99] */ if (p->sched_class == &idle_sched_class) @@ -8590,7 +8590,7 @@ void normalize_rt_tasks(void) schedstat_set(p->stats.sleep_start, 0); schedstat_set(p->stats.block_start, 0); - if (!realtime_task(p)) { + if (!rt_or_dl_task(p)) { /* * Renice negative nice level userspace * tasks back to 0: diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 6d60326d73e4..60e70c889d91 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -57,7 +57,7 @@ static int effective_prio(struct task_struct *p) * keep the priority unchanged. Otherwise, update priority * to the normal priority: */ - if (!realtime_prio(p->prio)) + if (!rt_or_dl_prio(p->prio)) return p->normal_prio; return p->prio; } diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c index a1d1d8d886a8..f4be3abbb47b 100644 --- a/kernel/time/hrtimer.c +++ b/kernel/time/hrtimer.c @@ -1975,7 +1975,7 @@ static void __hrtimer_init_sleeper(struct hrtimer_sleeper *sl, * expiry. */ if (IS_ENABLED(CONFIG_PREEMPT_RT)) { - if (realtime_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) + if (rt_or_dl_task_policy(current) && !(mode & HRTIMER_MODE_SOFT)) mode |= HRTIMER_MODE_HARD; } @@ -2075,7 +2075,7 @@ long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode, u64 slack; slack = current->timer_slack_ns; - if (realtime_task(current)) + if (rt_or_dl_task(current)) slack = 0; hrtimer_init_sleeper_on_stack(&t, clockid, mode); @@ -2280,7 +2280,7 @@ schedule_hrtimeout_range_clock(ktime_t *expires, u64 delta, * Override any slack passed by the user if under * rt contraints. */ - if (realtime_task(current)) + if (rt_or_dl_task(current)) delta = 0; hrtimer_init_sleeper_on_stack(&t, clock_id, mode); diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 1824e17c93c7..ae2ace5e515a 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c @@ -547,7 +547,7 @@ probe_wakeup(void *ignore, struct task_struct *p) * - wakeup_dl handles tasks belonging to sched_dl class only. */ if (tracing_dl || (wakeup_dl && !dl_task(p)) || - (wakeup_rt && !realtime_task(p)) || + (wakeup_rt && !rt_or_dl_task(p)) || (!dl_task(p) && (p->prio >= wakeup_prio || p->prio >= current->prio))) return; diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 78dcad729703..7a04cb1918fd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -418,7 +418,7 @@ static void domain_dirty_limits(struct dirty_throttle_control *dtc) bg_thresh = (bg_ratio * available_memory) / PAGE_SIZE; tsk = current; - if (realtime_task(tsk)) { + if (rt_or_dl_task(tsk)) { bg_thresh += bg_thresh / 4 + global_wb_domain.dirty_limit / 32; thresh += thresh / 4 + global_wb_domain.dirty_limit / 32; } @@ -477,7 +477,7 @@ static unsigned long node_dirty_limit(struct pglist_data *pgdat) else dirty = vm_dirty_ratio * node_memory / 100; - if (realtime_task(tsk)) + if (rt_or_dl_task(tsk)) dirty += dirty / 4; /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 54274e468d51..36f8abde3751 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4002,7 +4002,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask, unsigned int order) */ if (alloc_flags & ALLOC_MIN_RESERVE) alloc_flags &= ~ALLOC_CPUSET; - } else if (unlikely(realtime_task(current)) && in_task()) + } else if (unlikely(rt_or_dl_task(current)) && in_task()) alloc_flags |= ALLOC_MIN_RESERVE; alloc_flags = gfp_to_alloc_flags_cma(gfp_mask, alloc_flags); -- cgit v1.2.3 From f25b7b32b0db6d71b07b06fe8de45b0408541c2a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 14 Oct 2023 23:12:20 +0200 Subject: sched/eevdf: Add feature comments Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.287790895@infradead.org --- kernel/sched/features.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 929021fd6bc4..97fb2d492089 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -5,7 +5,14 @@ * sleep+wake cycles. EEVDF placement strategy #1, #2 if disabled. */ SCHED_FEAT(PLACE_LAG, true) +/* + * Give new tasks half a slice to ease into the competition. + */ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) +/* + * Inhibit (wakeup) preemption until the current task has either matched the + * 0-lag point or until is has exhausted it's slice. + */ SCHED_FEAT(RUN_TO_PARITY, true) /* -- cgit v1.2.3 From 949090eaf0a3e39aa0f4a675407e16d0e975da11 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 4 Oct 2023 12:43:53 +0200 Subject: sched/eevdf: Remove min_vruntime_copy Since commit e8f331bcc270 ("sched/smp: Use lag to simplify cross-runqueue placement") the min_vruntime_copy is no longer used. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.395297941@infradead.org --- kernel/sched/fair.c | 5 ++--- kernel/sched/sched.h | 4 ---- 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 6d39a824bbe1..8201f0f4e709 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -779,8 +779,7 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) } /* ensure we never gain time by being placed backwards. */ - u64_u32_store(cfs_rq->min_vruntime, - __update_min_vruntime(cfs_rq, vruntime)); + cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime); } static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) @@ -12933,7 +12932,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) void init_cfs_rq(struct cfs_rq *cfs_rq) { cfs_rq->tasks_timeline = RB_ROOT_CACHED; - u64_u32_store(cfs_rq->min_vruntime, (u64)(-(1LL << 20))); + cfs_rq->min_vruntime = (u64)(-(1LL << 20)); #ifdef CONFIG_SMP raw_spin_lock_init(&cfs_rq->removed.lock); #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1e1d1b467af2..a6d6b6f106da 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -613,10 +613,6 @@ struct cfs_rq { u64 min_vruntime_fi; #endif -#ifndef CONFIG_64BIT - u64 min_vruntime_copy; -#endif - struct rb_root_cached tasks_timeline; /* -- cgit v1.2.3 From 8e2e13ac6122915bd98315237b0317495e391be0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2024 09:50:07 +0200 Subject: sched/fair: Cleanup pick_task_fair() vs throttle Per 54d27365cae8 ("sched/fair: Prevent throttling in early pick_next_task_fair()") the reason check_cfs_rq_runtime() is under the 'if (curr)' check is to ensure the (downward) traversal does not result in an empty cfs_rq. But then the pick_task_fair() 'copy' of all this made it restart the traversal anyway, so that seems to solve the issue too. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Ben Segall Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.501679876@infradead.org --- kernel/sched/fair.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8201f0f4e709..7ba1ca56a63e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8471,11 +8471,11 @@ again: update_curr(cfs_rq); else curr = NULL; - - if (unlikely(check_cfs_rq_runtime(cfs_rq))) - goto again; } + if (unlikely(check_cfs_rq_runtime(cfs_rq))) + goto again; + se = pick_next_entity(cfs_rq); cfs_rq = group_cfs_rq(se); } while (cfs_rq); -- cgit v1.2.3 From c97f54fe6d014419e557200ed075cf53b47c5420 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2024 09:50:12 +0200 Subject: sched/fair: Cleanup pick_task_fair()'s curr With 4c456c9ad334 ("sched/fair: Remove unused 'curr' argument from pick_next_entity()") curr is no longer being used, so no point in clearing it. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.614707623@infradead.org --- kernel/sched/fair.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 7ba1ca56a63e..175ccec8c53f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8463,15 +8463,9 @@ again: return NULL; do { - struct sched_entity *curr = cfs_rq->curr; - /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ - if (curr) { - if (curr->on_rq) - update_curr(cfs_rq); - else - curr = NULL; - } + if (cfs_rq->curr && cfs_rq->curr->on_rq) + update_curr(cfs_rq); if (unlikely(check_cfs_rq_runtime(cfs_rq))) goto again; -- cgit v1.2.3 From 3b3dd89b8bb0f03657859c22c86c19224f778638 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2024 09:50:16 +0200 Subject: sched/fair: Unify pick_{,next_}_task_fair() Implement pick_next_task_fair() in terms of pick_task_fair() to de-duplicate the pick loop. More importantly, this makes all the pick loops use the state-invariant form, which is useful to introduce further re-try conditions in later patches. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.725062368@infradead.org --- kernel/sched/fair.c | 60 +++++++---------------------------------------------- 1 file changed, 8 insertions(+), 52 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 175ccec8c53f..1452c536b578 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8451,7 +8451,6 @@ preempt: resched_curr(rq); } -#ifdef CONFIG_SMP static struct task_struct *pick_task_fair(struct rq *rq) { struct sched_entity *se; @@ -8463,7 +8462,7 @@ again: return NULL; do { - /* When we pick for a remote RQ, we'll not have done put_prev_entity() */ + /* Might not have done put_prev_entity() */ if (cfs_rq->curr && cfs_rq->curr->on_rq) update_curr(cfs_rq); @@ -8484,19 +8483,19 @@ again: return task_of(se); } -#endif struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { - struct cfs_rq *cfs_rq = &rq->cfs; struct sched_entity *se; struct task_struct *p; int new_tasks; again: - if (!sched_fair_runnable(rq)) + p = pick_task_fair(rq); + if (!p) goto idle; + se = &p->se; #ifdef CONFIG_FAIR_GROUP_SCHED if (!prev || prev->sched_class != &fair_sched_class) @@ -8508,52 +8507,14 @@ again: * * Therefore attempt to avoid putting and setting the entire cgroup * hierarchy, only change the part that actually changes. - */ - - do { - struct sched_entity *curr = cfs_rq->curr; - - /* - * Since we got here without doing put_prev_entity() we also - * have to consider cfs_rq->curr. If it is still a runnable - * entity, update_curr() will update its vruntime, otherwise - * forget we've ever seen it. - */ - if (curr) { - if (curr->on_rq) - update_curr(cfs_rq); - else - curr = NULL; - - /* - * This call to check_cfs_rq_runtime() will do the - * throttle and dequeue its entity in the parent(s). - * Therefore the nr_running test will indeed - * be correct. - */ - if (unlikely(check_cfs_rq_runtime(cfs_rq))) { - cfs_rq = &rq->cfs; - - if (!cfs_rq->nr_running) - goto idle; - - goto simple; - } - } - - se = pick_next_entity(cfs_rq); - cfs_rq = group_cfs_rq(se); - } while (cfs_rq); - - p = task_of(se); - - /* + * * Since we haven't yet done put_prev_entity and if the selected task * is a different task than we started out with, try and touch the * least amount of cfs_rqs. */ if (prev != p) { struct sched_entity *pse = &prev->se; + struct cfs_rq *cfs_rq; while (!(cfs_rq = is_same_group(se, pse))) { int se_depth = se->depth; @@ -8579,13 +8540,8 @@ simple: if (prev) put_prev_task(rq, prev); - do { - se = pick_next_entity(cfs_rq); - set_next_entity(cfs_rq, se); - cfs_rq = group_cfs_rq(se); - } while (cfs_rq); - - p = task_of(se); + for_each_sched_entity(se) + set_next_entity(cfs_rq_of(se), se); done: __maybe_unused; #ifdef CONFIG_SMP -- cgit v1.2.3 From 863ccdbb918a77e3f011571f943020bf7f0b114b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2024 09:50:20 +0200 Subject: sched: Allow sched_class::dequeue_task() to fail Change the function signature of sched_class::dequeue_task() to return a boolean, allowing future patches to 'fail' dequeue. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.864630153@infradead.org --- kernel/sched/core.c | 7 +++++-- kernel/sched/deadline.c | 4 +++- kernel/sched/fair.c | 4 +++- kernel/sched/idle.c | 3 ++- kernel/sched/rt.c | 4 +++- kernel/sched/sched.h | 4 ++-- kernel/sched/stop_task.c | 3 ++- 7 files changed, 20 insertions(+), 9 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ab50100363ca..4f7a4e98f22d 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2001,7 +2001,10 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags) sched_core_enqueue(rq, p); } -void dequeue_task(struct rq *rq, struct task_struct *p, int flags) +/* + * Must only return false when DEQUEUE_SLEEP. + */ +inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) { if (sched_core_enabled(rq)) sched_core_dequeue(rq, p, flags); @@ -2015,7 +2018,7 @@ void dequeue_task(struct rq *rq, struct task_struct *p, int flags) } uclamp_rq_dec(rq, p); - p->sched_class->dequeue_task(rq, p, flags); + return p->sched_class->dequeue_task(rq, p, flags); } void activate_task(struct rq *rq, struct task_struct *p, int flags) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index c5f1cc753a31..bbaeace9d1e3 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2162,7 +2162,7 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) enqueue_pushable_dl_task(rq, p); } -static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { update_curr_dl(rq); @@ -2172,6 +2172,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) dequeue_dl_entity(&p->dl, flags); if (!p->dl.dl_throttled && !dl_server(&p->dl)) dequeue_pushable_dl_task(rq, p); + + return true; } /* diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1452c536b578..03f76b3a6f1c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6865,7 +6865,7 @@ static void set_next_buddy(struct sched_entity *se); * decreased. We remove the task from the rbtree and * update the fair scheduling stats: */ -static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { struct cfs_rq *cfs_rq; struct sched_entity *se = &p->se; @@ -6937,6 +6937,8 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) dequeue_throttle: util_est_update(&rq->cfs, p, task_sleep); hrtick_update(rq); + + return true; } #ifdef CONFIG_SMP diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index d560f7ffa463..16074209ce26 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -482,13 +482,14 @@ struct task_struct *pick_next_task_idle(struct rq *rq) * It is not legal to sleep in the idle task - print a warning * message if some code attempts to do it: */ -static void +static bool dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) { raw_spin_rq_unlock_irq(rq); printk(KERN_ERR "bad: scheduling from the idle thread!\n"); dump_stack(); raw_spin_rq_lock_irq(rq); + return true; } /* diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index a8731da04cd5..fdc8e059cab0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1483,7 +1483,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) enqueue_pushable_task(rq, p); } -static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) +static bool dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) { struct sched_rt_entity *rt_se = &p->rt; @@ -1491,6 +1491,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) dequeue_rt_entity(rt_se, flags); dequeue_pushable_task(rq, p); + + return true; } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index a6d6b6f106da..6196f90df93e 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2285,7 +2285,7 @@ struct sched_class { #endif void (*enqueue_task) (struct rq *rq, struct task_struct *p, int flags); - void (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); + bool (*dequeue_task) (struct rq *rq, struct task_struct *p, int flags); void (*yield_task) (struct rq *rq); bool (*yield_to_task)(struct rq *rq, struct task_struct *p); @@ -3606,7 +3606,7 @@ extern int __sched_setaffinity(struct task_struct *p, struct affinity_context *c extern void __setscheduler_prio(struct task_struct *p, int prio); extern void set_load_weight(struct task_struct *p, bool update_load); extern void enqueue_task(struct rq *rq, struct task_struct *p, int flags); -extern void dequeue_task(struct rq *rq, struct task_struct *p, int flags); +extern bool dequeue_task(struct rq *rq, struct task_struct *p, int flags); extern void check_class_changed(struct rq *rq, struct task_struct *p, const struct sched_class *prev_class, diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index b1b8fe61c532..4cf02074fa9e 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -57,10 +57,11 @@ enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) add_nr_running(rq, 1); } -static void +static bool dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { sub_nr_running(rq, 1); + return true; } static void yield_task_stop(struct rq *rq) -- cgit v1.2.3 From fab4a808ba9fb59b691d7096eed9b1494812ffd6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 3 Apr 2024 09:50:41 +0200 Subject: sched/fair: Re-organize dequeue_task_fair() Working towards delaying dequeue, notably also inside the hierachy, rework dequeue_task_fair() such that it can 'resume' an interrupted hierarchy walk. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105028.977256873@infradead.org --- kernel/sched/fair.c | 62 +++++++++++++++++++++++++++++++++++------------------ 1 file changed, 41 insertions(+), 21 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 03f76b3a6f1c..59b00d78d456 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6861,34 +6861,43 @@ enqueue_throttle: static void set_next_buddy(struct sched_entity *se); /* - * The dequeue_task method is called before nr_running is - * decreased. We remove the task from the rbtree and - * update the fair scheduling stats: + * Basically dequeue_task_fair(), except it can deal with dequeue_entity() + * failing half-way through and resume the dequeue later. + * + * Returns: + * -1 - dequeue delayed + * 0 - dequeue throttled + * 1 - dequeue complete */ -static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) +static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) { - struct cfs_rq *cfs_rq; - struct sched_entity *se = &p->se; - int task_sleep = flags & DEQUEUE_SLEEP; - int idle_h_nr_running = task_has_idle_policy(p); bool was_sched_idle = sched_idle_rq(rq); int rq_h_nr_running = rq->cfs.h_nr_running; + bool task_sleep = flags & DEQUEUE_SLEEP; + struct task_struct *p = NULL; + int idle_h_nr_running = 0; + int h_nr_running = 0; + struct cfs_rq *cfs_rq; - util_est_dequeue(&rq->cfs, p); + if (entity_is_task(se)) { + p = task_of(se); + h_nr_running = 1; + idle_h_nr_running = task_has_idle_policy(p); + } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); - cfs_rq->h_nr_running--; + cfs_rq->h_nr_running -= h_nr_running; cfs_rq->idle_h_nr_running -= idle_h_nr_running; if (cfs_rq_is_idle(cfs_rq)) - idle_h_nr_running = 1; + idle_h_nr_running = h_nr_running; /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) - goto dequeue_throttle; + return 0; /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -6912,20 +6921,18 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) se_update_runnable(se); update_cfs_group(se); - cfs_rq->h_nr_running--; + cfs_rq->h_nr_running -= h_nr_running; cfs_rq->idle_h_nr_running -= idle_h_nr_running; if (cfs_rq_is_idle(cfs_rq)) - idle_h_nr_running = 1; + idle_h_nr_running = h_nr_running; /* end evaluation on encountering a throttled cfs_rq */ if (cfs_rq_throttled(cfs_rq)) - goto dequeue_throttle; - + return 0; } - /* At this point se is NULL and we are at root level*/ - sub_nr_running(rq, 1); + sub_nr_running(rq, h_nr_running); if (rq_h_nr_running && !rq->cfs.h_nr_running) dl_server_stop(&rq->fair_server); @@ -6934,10 +6941,23 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) if (unlikely(!was_sched_idle && sched_idle_rq(rq))) rq->next_balance = jiffies; -dequeue_throttle: - util_est_update(&rq->cfs, p, task_sleep); - hrtick_update(rq); + return 1; +} + +/* + * The dequeue_task method is called before nr_running is + * decreased. We remove the task from the rbtree and + * update the fair scheduling stats: + */ +static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) +{ + util_est_dequeue(&rq->cfs, p); + + if (dequeue_entities(rq, &p->se, flags) < 0) + return false; + util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); + hrtick_update(rq); return true; } -- cgit v1.2.3 From e8901061ca0cd9acbd3d29d41d16c69c2bfff9f0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 10:48:09 +0200 Subject: sched: Split DEQUEUE_SLEEP from deactivate_task() As a preparation for dequeue_task() failing, and a second code-path needing to take care of the 'success' path, split out the DEQEUE_SLEEP path from deactivate_task(). Much thanks to Libo for spotting and fixing a TASK_ON_RQ_MIGRATING ordering fail. Fixed-by: Libo Chen Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.086192709@infradead.org --- kernel/sched/core.c | 23 +++++++++++++---------- kernel/sched/sched.h | 14 ++++++++++++++ 2 files changed, 27 insertions(+), 10 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4f7a4e98f22d..6c595485bcbc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2036,12 +2036,23 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { - WRITE_ONCE(p->on_rq, (flags & DEQUEUE_SLEEP) ? 0 : TASK_ON_RQ_MIGRATING); + WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ASSERT_EXCLUSIVE_WRITER(p->on_rq); + /* + * Code explicitly relies on TASK_ON_RQ_MIGRATING begin set *before* + * dequeue_task() and cleared *after* enqueue_task(). + */ + dequeue_task(rq, p, flags); } +static void block_task(struct rq *rq, struct task_struct *p, int flags) +{ + if (dequeue_task(rq, p, DEQUEUE_SLEEP | flags)) + __block_task(rq, p); +} + /** * task_curr - is this task currently executing on a CPU? * @p: the task in question. @@ -6498,9 +6509,6 @@ static void __sched notrace __schedule(unsigned int sched_mode) !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); - if (prev->sched_contributes_to_load) - rq->nr_uninterruptible++; - /* * __schedule() ttwu() * prev_state = prev->state; if (p->on_rq && ...) @@ -6512,12 +6520,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) * * After this, schedule() must not care about p->state any more. */ - deactivate_task(rq, prev, DEQUEUE_SLEEP | DEQUEUE_NOCLOCK); - - if (prev->in_iowait) { - atomic_inc(&rq->nr_iowait); - delayacct_blkio_start(); - } + block_task(rq, prev, DEQUEUE_NOCLOCK); } switch_count = &prev->nvcsw; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 6196f90df93e..69ab3b0289c0 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -68,6 +68,7 @@ #include #include #include +#include #include #include @@ -2585,6 +2586,19 @@ static inline void sub_nr_running(struct rq *rq, unsigned count) sched_update_tick_dependency(rq); } +static inline void __block_task(struct rq *rq, struct task_struct *p) +{ + WRITE_ONCE(p->on_rq, 0); + ASSERT_EXCLUSIVE_WRITER(p->on_rq); + if (p->sched_contributes_to_load) + rq->nr_uninterruptible++; + + if (p->in_iowait) { + atomic_inc(&rq->nr_iowait); + delayacct_blkio_start(); + } +} + extern void activate_task(struct rq *rq, struct task_struct *p, int flags); extern void deactivate_task(struct rq *rq, struct task_struct *p, int flags); -- cgit v1.2.3 From abc158c82ae555078aa5dd2d8407c3df0f868904 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 10:55:59 +0200 Subject: sched: Prepare generic code for delayed dequeue While most of the delayed dequeue code can be done inside the sched_class itself, there is one location where we do not have an appropriate hook, namely ttwu_runnable(). Add an ENQUEUE_DELAYED call to the on_rq path to deal with waking delayed dequeue tasks. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.200000445@infradead.org --- include/linux/sched.h | 1 + kernel/sched/core.c | 14 +++++++++++++- kernel/sched/sched.h | 2 ++ 3 files changed, 16 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 2c1b4ee3234f..f4a648e739e1 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -544,6 +544,7 @@ struct sched_entity { struct list_head group_node; unsigned int on_rq; + unsigned int sched_delayed; u64 exec_start; u64 sum_exec_runtime; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 6c595485bcbc..7356464155a1 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2036,6 +2036,8 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags) void deactivate_task(struct rq *rq, struct task_struct *p, int flags) { + SCHED_WARN_ON(flags & DEQUEUE_SLEEP); + WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING); ASSERT_EXCLUSIVE_WRITER(p->on_rq); @@ -3689,12 +3691,14 @@ static int ttwu_runnable(struct task_struct *p, int wake_flags) rq = __task_rq_lock(p, &rf); if (task_on_rq_queued(p)) { + update_rq_clock(rq); + if (p->se.sched_delayed) + enqueue_task(rq, p, ENQUEUE_NOCLOCK | ENQUEUE_DELAYED); if (!task_on_cpu(rq, p)) { /* * When on_rq && !on_cpu the task is preempted, see if * it should preempt the task that is current now. */ - update_rq_clock(rq); wakeup_preempt(rq, p, wake_flags); } ttwu_do_wakeup(p); @@ -4074,11 +4078,16 @@ int try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags) * case the whole 'p->on_rq && ttwu_runnable()' case below * without taking any locks. * + * Specifically, given current runs ttwu() we must be before + * schedule()'s block_task(), as such this must not observe + * sched_delayed. + * * In particular: * - we rely on Program-Order guarantees for all the ordering, * - we're serialized against set_special_state() by virtue of * it disabling IRQs (this allows not taking ->pi_lock). */ + SCHED_WARN_ON(p->se.sched_delayed); if (!ttwu_state_match(p, state, &success)) goto out; @@ -4370,6 +4379,9 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.slice = sysctl_sched_base_slice; INIT_LIST_HEAD(&p->se.group_node); + /* A delayed task cannot be in clone(). */ + SCHED_WARN_ON(p->se.sched_delayed); + #ifdef CONFIG_FAIR_GROUP_SCHED p->se.cfs_rq = NULL; #endif diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 69ab3b0289c0..ffca9779519c 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2253,6 +2253,7 @@ extern const u32 sched_prio_to_wmult[40]; #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ +#define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */ #define ENQUEUE_WAKEUP 0x01 #define ENQUEUE_RESTORE 0x02 @@ -2268,6 +2269,7 @@ extern const u32 sched_prio_to_wmult[40]; #endif #define ENQUEUE_INITIAL 0x80 #define ENQUEUE_MIGRATING 0x100 +#define ENQUEUE_DELAYED 0x200 #define RETRY_TASK ((void *)-1UL) -- cgit v1.2.3 From dfa0a574cbc47bfd5f8985f74c8ea003a37fa078 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 5 Jun 2024 12:09:11 +0200 Subject: sched/uclamg: Handle delayed dequeue Delayed dequeue has tasks sit around on the runqueue that are not actually runnable -- specifically, they will be dequeued the moment they get picked. One side-effect is that such a task can get migrated, which leads to a 'nested' dequeue_task() scenario that messes up uclamp if we don't take care. Notably, dequeue_task(DEQUEUE_SLEEP) can 'fail' and keep the task on the runqueue. This however will have removed the task from uclamp -- per uclamp_rq_dec() in dequeue_task(). So far so good. However, if at that point the task gets migrated -- or nice adjusted or any of a myriad of operations that does a dequeue-enqueue cycle -- we'll pass through dequeue_task()/enqueue_task() again. Without modification this will lead to a double decrement for uclamp, which is wrong. Reported-by: Luis Machado Reported-by: Hongyan Xia Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.315205425@infradead.org --- kernel/sched/core.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 7356464155a1..80e639e23140 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -1691,6 +1691,9 @@ static inline void uclamp_rq_inc(struct rq *rq, struct task_struct *p) if (unlikely(!p->sched_class->uclamp_enabled)) return; + if (p->se.sched_delayed) + return; + for_each_clamp_id(clamp_id) uclamp_rq_inc_id(rq, p, clamp_id); @@ -1715,6 +1718,9 @@ static inline void uclamp_rq_dec(struct rq *rq, struct task_struct *p) if (unlikely(!p->sched_class->uclamp_enabled)) return; + if (p->se.sched_delayed) + return; + for_each_clamp_id(clamp_id) uclamp_rq_dec_id(rq, p, clamp_id); } @@ -1994,8 +2000,12 @@ void enqueue_task(struct rq *rq, struct task_struct *p, int flags) psi_enqueue(p, (flags & ENQUEUE_WAKEUP) && !(flags & ENQUEUE_MIGRATED)); } - uclamp_rq_inc(rq, p); p->sched_class->enqueue_task(rq, p, flags); + /* + * Must be after ->enqueue_task() because ENQUEUE_DELAYED can clear + * ->sched_delayed. + */ + uclamp_rq_inc(rq, p); if (sched_core_enabled(rq)) sched_core_enqueue(rq, p); @@ -2017,6 +2027,10 @@ inline bool dequeue_task(struct rq *rq, struct task_struct *p, int flags) psi_dequeue(p, flags & DEQUEUE_SLEEP); } + /* + * Must be before ->dequeue_task() because ->dequeue_task() can 'fail' + * and mark the task ->sched_delayed. + */ uclamp_rq_dec(rq, p); return p->sched_class->dequeue_task(rq, p, flags); } -- cgit v1.2.3 From e28b5f8bda01720b5ce8456b48cf4b963f9a80a1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 11:00:10 +0200 Subject: sched/fair: Assert {set_next,put_prev}_entity() are properly balanced Just a little sanity test.. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.486423066@infradead.org --- kernel/sched/fair.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 59b00d78d456..37acd530f413 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5452,6 +5452,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) } update_stats_curr_start(cfs_rq, se); + SCHED_WARN_ON(cfs_rq->curr); cfs_rq->curr = se; /* @@ -5513,6 +5514,7 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) /* in !on_rq case, update occurred at dequeue */ update_load_avg(cfs_rq, prev, 0); } + SCHED_WARN_ON(cfs_rq->curr != prev); cfs_rq->curr = NULL; } -- cgit v1.2.3 From 2e0199df252a536a03f4cb0810324dff523d1e79 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 11:03:42 +0200 Subject: sched/fair: Prepare exit/cleanup paths for delayed_dequeue When dequeue_task() is delayed it becomes possible to exit a task (or cgroup) that is still enqueued. Ensure things are dequeued before freeing. Thanks to Valentin for asking the obvious questions and making switched_from_fair() less weird. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.631948434@infradead.org --- kernel/sched/fair.c | 59 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 13 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 37acd530f413..9a849030f19c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8342,7 +8342,21 @@ static void migrate_task_rq_fair(struct task_struct *p, int new_cpu) static void task_dead_fair(struct task_struct *p) { - remove_entity_load_avg(&p->se); + struct sched_entity *se = &p->se; + + if (se->sched_delayed) { + struct rq_flags rf; + struct rq *rq; + + rq = task_rq_lock(p, &rf); + if (se->sched_delayed) { + update_rq_clock(rq); + dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); + } + task_rq_unlock(rq, p, &rf); + } + + remove_entity_load_avg(se); } /* @@ -12854,10 +12868,22 @@ static void attach_task_cfs_rq(struct task_struct *p) static void switched_from_fair(struct rq *rq, struct task_struct *p) { detach_task_cfs_rq(p); + /* + * Since this is called after changing class, this is a little weird + * and we cannot use DEQUEUE_DELAYED. + */ + if (p->se.sched_delayed) { + dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); + p->se.sched_delayed = 0; + if (sched_feat(DELAY_ZERO) && p->se.vlag > 0) + p->se.vlag = 0; + } } static void switched_to_fair(struct rq *rq, struct task_struct *p) { + SCHED_WARN_ON(p->se.sched_delayed); + attach_task_cfs_rq(p); set_task_max_allowed_capacity(p); @@ -13008,28 +13034,35 @@ void online_fair_sched_group(struct task_group *tg) void unregister_fair_sched_group(struct task_group *tg) { - unsigned long flags; - struct rq *rq; int cpu; destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); for_each_possible_cpu(cpu) { - if (tg->se[cpu]) - remove_entity_load_avg(tg->se[cpu]); + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu]; + struct sched_entity *se = tg->se[cpu]; + struct rq *rq = cpu_rq(cpu); + + if (se) { + if (se->sched_delayed) { + guard(rq_lock_irqsave)(rq); + if (se->sched_delayed) { + update_rq_clock(rq); + dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); + } + list_del_leaf_cfs_rq(cfs_rq); + } + remove_entity_load_avg(se); + } /* * Only empty task groups can be destroyed; so we can speculatively * check on_list without danger of it being re-added. */ - if (!tg->cfs_rq[cpu]->on_list) - continue; - - rq = cpu_rq(cpu); - - raw_spin_rq_lock_irqsave(rq, flags); - list_del_leaf_cfs_rq(tg->cfs_rq[cpu]); - raw_spin_rq_unlock_irqrestore(rq, flags); + if (cfs_rq->on_list) { + guard(rq_lock_irqsave)(rq); + list_del_leaf_cfs_rq(cfs_rq); + } } } -- cgit v1.2.3 From f12e148892ede8d9ee82bcd3e469e6d01fc077ac Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 11:26:25 +0200 Subject: sched/fair: Prepare pick_next_task() for delayed dequeue Delayed dequeue's natural end is when it gets picked again. Ensure pick_next_task() knows what to do with delayed tasks. Note, this relies on the earlier patch that made pick_next_task() state invariant -- it will restart the pick on dequeue, because obviously the just dequeued task is no longer eligible. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.747330118@infradead.org --- kernel/sched/fair.c | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9a849030f19c..a4f1f79ac90c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5473,6 +5473,8 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) se->prev_sum_exec_runtime = se->sum_exec_runtime; } +static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags); + /* * Pick the next process, keeping these things in mind, in this order: * 1) keep things fair between processes/task groups @@ -5481,16 +5483,27 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) * 4) do not run the "skip" process, if something else is available */ static struct sched_entity * -pick_next_entity(struct cfs_rq *cfs_rq) +pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq) { /* * Enabling NEXT_BUDDY will affect latency but not fairness. */ if (sched_feat(NEXT_BUDDY) && - cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) + cfs_rq->next && entity_eligible(cfs_rq, cfs_rq->next)) { + /* ->next will never be delayed */ + SCHED_WARN_ON(cfs_rq->next->sched_delayed); return cfs_rq->next; + } + + struct sched_entity *se = pick_eevdf(cfs_rq); + if (se->sched_delayed) { + dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); + SCHED_WARN_ON(se->sched_delayed); + SCHED_WARN_ON(se->on_rq); - return pick_eevdf(cfs_rq); + return NULL; + } + return se; } static bool check_cfs_rq_runtime(struct cfs_rq *cfs_rq); @@ -8507,7 +8520,9 @@ again: if (unlikely(check_cfs_rq_runtime(cfs_rq))) goto again; - se = pick_next_entity(cfs_rq); + se = pick_next_entity(rq, cfs_rq); + if (!se) + goto again; cfs_rq = group_cfs_rq(se); } while (cfs_rq); -- cgit v1.2.3 From 781773e3b68031bd001c0c18aa72e8470c225ebd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 11:57:43 +0200 Subject: sched/fair: Implement ENQUEUE_DELAYED Doing a wakeup on a delayed dequeue task is about as simple as it sounds -- remove the delayed mark and enjoy the fact it was actually still on the runqueue. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.888107381@infradead.org --- kernel/sched/fair.c | 33 +++++++++++++++++++++++++++++++-- 1 file changed, 31 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index a4f1f79ac90c..25b14dffeb37 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5289,6 +5289,9 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq); static inline bool cfs_bandwidth_used(void); +static void +requeue_delayed_entity(struct sched_entity *se); + static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { @@ -5922,8 +5925,10 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) for_each_sched_entity(se) { struct cfs_rq *qcfs_rq = cfs_rq_of(se); - if (se->on_rq) + if (se->on_rq) { + SCHED_WARN_ON(se->sched_delayed); break; + } enqueue_entity(qcfs_rq, se, ENQUEUE_WAKEUP); if (cfs_rq_is_idle(group_cfs_rq(se))) @@ -6773,6 +6778,22 @@ static int sched_idle_cpu(int cpu) } #endif +static void +requeue_delayed_entity(struct sched_entity *se) +{ + struct cfs_rq *cfs_rq = cfs_rq_of(se); + + /* + * se->sched_delayed should imply: se->on_rq == 1. + * Because a delayed entity is one that is still on + * the runqueue competing until elegibility. + */ + SCHED_WARN_ON(!se->sched_delayed); + SCHED_WARN_ON(!se->on_rq); + + se->sched_delayed = 0; +} + /* * The enqueue_task method is called before nr_running is * increased. Here we update the fair scheduling stats and @@ -6787,6 +6808,11 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) int task_new = !(flags & ENQUEUE_WAKEUP); int rq_h_nr_running = rq->cfs.h_nr_running; + if (flags & ENQUEUE_DELAYED) { + requeue_delayed_entity(se); + return; + } + /* * The code below (indirectly) updates schedutil which looks at * the cfs_rq utilization to select a frequency. @@ -6804,8 +6830,11 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT); for_each_sched_entity(se) { - if (se->on_rq) + if (se->on_rq) { + if (se->sched_delayed) + requeue_delayed_entity(se); break; + } cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); -- cgit v1.2.3 From a1c446611e31ca5363d4db51e398271da1dce0af Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 Jul 2024 21:30:09 +0200 Subject: sched,freezer: Mark TASK_FROZEN special The special task states are those that do not suffer spurious wakeups, TASK_FROZEN is very much one of those, mark it as such. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105029.998329901@infradead.org --- include/linux/sched.h | 5 +++-- kernel/freezer.c | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index f4a648e739e1..8a3a389bd623 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -149,8 +149,9 @@ struct user_event_mm; * Special states are those that do not use the normal wait-loop pattern. See * the comment with set_special_state(). */ -#define is_special_task_state(state) \ - ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD)) +#define is_special_task_state(state) \ + ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | \ + TASK_DEAD | TASK_FROZEN)) #ifdef CONFIG_DEBUG_ATOMIC_SLEEP # define debug_normal_state_change(state_value) \ diff --git a/kernel/freezer.c b/kernel/freezer.c index f57aaf96b829..44bbd7dbd2c8 100644 --- a/kernel/freezer.c +++ b/kernel/freezer.c @@ -72,7 +72,7 @@ bool __refrigerator(bool check_kthr_stop) bool freeze; raw_spin_lock_irq(¤t->pi_lock); - set_current_state(TASK_FROZEN); + WRITE_ONCE(current->__state, TASK_FROZEN); /* unstale saved_state so that __thaw_task() will wake us up */ current->saved_state = TASK_RUNNING; raw_spin_unlock_irq(¤t->pi_lock); -- cgit v1.2.3 From e1459a50ba31831efdfc35278023d959e4ba775b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 1 Jul 2024 21:38:11 +0200 Subject: sched: Teach dequeue_task() about special task states Since special task states must not suffer spurious wakeups, and the proposed delayed dequeue can cause exactly these (under some boundary conditions), propagate this knowledge into dequeue_task() such that it can do the right thing. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.110439521@infradead.org --- kernel/sched/core.c | 7 ++++++- kernel/sched/sched.h | 3 ++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 80e639e23140..868b71b9f2e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6530,11 +6530,16 @@ static void __sched notrace __schedule(unsigned int sched_mode) if (signal_pending_state(prev_state, prev)) { WRITE_ONCE(prev->__state, TASK_RUNNING); } else { + int flags = DEQUEUE_NOCLOCK; + prev->sched_contributes_to_load = (prev_state & TASK_UNINTERRUPTIBLE) && !(prev_state & TASK_NOLOAD) && !(prev_state & TASK_FROZEN); + if (unlikely(is_special_task_state(prev_state))) + flags |= DEQUEUE_SPECIAL; + /* * __schedule() ttwu() * prev_state = prev->state; if (p->on_rq && ...) @@ -6546,7 +6551,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) * * After this, schedule() must not care about p->state any more. */ - block_task(rq, prev, DEQUEUE_NOCLOCK); + block_task(rq, prev, flags); } switch_count = &prev->nvcsw; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index ffca9779519c..263b4ded2b47 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2248,10 +2248,11 @@ extern const u32 sched_prio_to_wmult[40]; * */ -#define DEQUEUE_SLEEP 0x01 +#define DEQUEUE_SLEEP 0x01 /* Matches ENQUEUE_WAKEUP */ #define DEQUEUE_SAVE 0x02 /* Matches ENQUEUE_RESTORE */ #define DEQUEUE_MOVE 0x04 /* Matches ENQUEUE_MOVE */ #define DEQUEUE_NOCLOCK 0x08 /* Matches ENQUEUE_NOCLOCK */ +#define DEQUEUE_SPECIAL 0x10 #define DEQUEUE_MIGRATING 0x100 /* Matches ENQUEUE_MIGRATING */ #define DEQUEUE_DELAYED 0x200 /* Matches ENQUEUE_DELAYED */ -- cgit v1.2.3 From 152e11f6df293e816a6a37c69757033cdc72667d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 12:25:32 +0200 Subject: sched/fair: Implement delayed dequeue Extend / fix 86bfbb7ce4f6 ("sched/fair: Add lag based placement") by noting that lag is fundamentally a temporal measure. It should not be carried around indefinitely. OTOH it should also not be instantly discarded, doing so will allow a task to game the system by purposefully (micro) sleeping at the end of its time quantum. Since lag is intimately tied to the virtual time base, a wall-time based decay is also insufficient, notably competition is required for any of this to make sense. Instead, delay the dequeue and keep the 'tasks' on the runqueue, competing until they are eligible. Strictly speaking, we only care about keeping them until the 0-lag point, but that is a difficult proposition, instead carry them around until they get picked again, and dequeue them at that point. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.226163742@infradead.org --- kernel/sched/deadline.c | 1 - kernel/sched/fair.c | 80 ++++++++++++++++++++++++++++++++++++++++++------- kernel/sched/features.h | 9 ++++++ 3 files changed, 79 insertions(+), 11 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index bbaeace9d1e3..0f2df67f710b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2428,7 +2428,6 @@ again: else p = dl_se->server_pick_next(dl_se); if (!p) { - WARN_ON_ONCE(1); dl_se->dl_yielded = 1; update_curr_dl_se(rq, dl_se, 0); goto again; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 25b14dffeb37..da5065a226ee 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5379,19 +5379,38 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); -static void +static bool dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { - int action = UPDATE_TG; + update_curr(cfs_rq); + + if (flags & DEQUEUE_DELAYED) { + SCHED_WARN_ON(!se->sched_delayed); + } else { + bool sleep = flags & DEQUEUE_SLEEP; + /* + * DELAY_DEQUEUE relies on spurious wakeups, special task + * states must not suffer spurious wakeups, excempt them. + */ + if (flags & DEQUEUE_SPECIAL) + sleep = false; + + SCHED_WARN_ON(sleep && se->sched_delayed); + + if (sched_feat(DELAY_DEQUEUE) && sleep && + !entity_eligible(cfs_rq, se)) { + if (cfs_rq->next == se) + cfs_rq->next = NULL; + se->sched_delayed = 1; + return false; + } + } + + int action = UPDATE_TG; if (entity_is_task(se) && task_on_rq_migrating(task_of(se))) action |= DO_DETACH; - /* - * Update run-time statistics of the 'current'. - */ - update_curr(cfs_rq); - /* * When dequeuing a sched_entity, we must: * - Update loads to have both entity and cfs_rq synced with now. @@ -5428,8 +5447,13 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); + if (flags & DEQUEUE_DELAYED) + se->sched_delayed = 0; + if (cfs_rq->nr_running == 0) update_idle_cfs_rq_clock_pelt(cfs_rq); + + return true; } static void @@ -5828,11 +5852,21 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq) idle_task_delta = cfs_rq->idle_h_nr_running; for_each_sched_entity(se) { struct cfs_rq *qcfs_rq = cfs_rq_of(se); + int flags; + /* throttled entity or throttle-on-deactivate */ if (!se->on_rq) goto done; - dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); + /* + * Abuse SPECIAL to avoid delayed dequeue in this instance. + * This avoids teaching dequeue_entities() about throttled + * entities and keeps things relatively simple. + */ + flags = DEQUEUE_SLEEP | DEQUEUE_SPECIAL; + if (se->sched_delayed) + flags |= DEQUEUE_DELAYED; + dequeue_entity(qcfs_rq, se, flags); if (cfs_rq_is_idle(group_cfs_rq(se))) idle_task_delta = cfs_rq->h_nr_running; @@ -6918,6 +6952,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) bool was_sched_idle = sched_idle_rq(rq); int rq_h_nr_running = rq->cfs.h_nr_running; bool task_sleep = flags & DEQUEUE_SLEEP; + bool task_delayed = flags & DEQUEUE_DELAYED; struct task_struct *p = NULL; int idle_h_nr_running = 0; int h_nr_running = 0; @@ -6931,7 +6966,13 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); - dequeue_entity(cfs_rq, se, flags); + + if (!dequeue_entity(cfs_rq, se, flags)) { + if (p && &p->se == se) + return -1; + + break; + } cfs_rq->h_nr_running -= h_nr_running; cfs_rq->idle_h_nr_running -= idle_h_nr_running; @@ -6956,6 +6997,7 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) break; } flags |= DEQUEUE_SLEEP; + flags &= ~(DEQUEUE_DELAYED | DEQUEUE_SPECIAL); } for_each_sched_entity(se) { @@ -6985,6 +7027,17 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) if (unlikely(!was_sched_idle && sched_idle_rq(rq))) rq->next_balance = jiffies; + if (p && task_delayed) { + SCHED_WARN_ON(!task_sleep); + SCHED_WARN_ON(p->on_rq != 1); + + /* Fix-up what dequeue_task_fair() skipped */ + hrtick_update(rq); + + /* Fix-up what block_task() skipped. */ + __block_task(rq, p); + } + return 1; } @@ -6997,8 +7050,10 @@ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { util_est_dequeue(&rq->cfs, p); - if (dequeue_entities(rq, &p->se, flags) < 0) + if (dequeue_entities(rq, &p->se, flags) < 0) { + util_est_update(&rq->cfs, p, DEQUEUE_SLEEP); return false; + } util_est_update(&rq->cfs, p, flags & DEQUEUE_SLEEP); hrtick_update(rq); @@ -12971,6 +13026,11 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) /* ensure bandwidth has been allocated on our new cfs_rq */ account_cfs_rq_runtime(cfs_rq, 0); } + + if (!first) + return; + + SCHED_WARN_ON(se->sched_delayed); } void init_cfs_rq(struct cfs_rq *cfs_rq) diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 97fb2d492089..1feaa7bbc278 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -28,6 +28,15 @@ SCHED_FEAT(NEXT_BUDDY, false) */ SCHED_FEAT(CACHE_HOT_BUDDY, true) +/* + * Delay dequeueing tasks until they get selected or woken. + * + * By delaying the dequeue for non-eligible tasks, they remain in the + * competition and can burn off their negative lag. When they get selected + * they'll have positive lag by definition. + */ +SCHED_FEAT(DELAY_DEQUEUE, true) + /* * Allow wakeup-time preemption of the current task: */ -- cgit v1.2.3 From 54a58a78779169f9c92a51facf6de7ce94962328 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 23 May 2024 12:26:06 +0200 Subject: sched/fair: Implement DELAY_ZERO 'Extend' DELAY_DEQUEUE by noting that since we wanted to dequeued them at the 0-lag point, truncate lag (eg. don't let them earn positive lag). Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.403750550@infradead.org --- kernel/sched/fair.c | 20 ++++++++++++++++++-- kernel/sched/features.h | 3 +++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index da5065a226ee..1a593393e308 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5447,8 +5447,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); - if (flags & DEQUEUE_DELAYED) + if (flags & DEQUEUE_DELAYED) { se->sched_delayed = 0; + if (sched_feat(DELAY_ZERO) && se->vlag > 0) + se->vlag = 0; + } if (cfs_rq->nr_running == 0) update_idle_cfs_rq_clock_pelt(cfs_rq); @@ -5527,7 +5530,6 @@ pick_next_entity(struct rq *rq, struct cfs_rq *cfs_rq) dequeue_entities(rq, se, DEQUEUE_SLEEP | DEQUEUE_DELAYED); SCHED_WARN_ON(se->sched_delayed); SCHED_WARN_ON(se->on_rq); - return NULL; } return se; @@ -6825,6 +6827,20 @@ requeue_delayed_entity(struct sched_entity *se) SCHED_WARN_ON(!se->sched_delayed); SCHED_WARN_ON(!se->on_rq); + if (sched_feat(DELAY_ZERO)) { + update_entity_lag(cfs_rq, se); + if (se->vlag > 0) { + cfs_rq->nr_running--; + if (se != cfs_rq->curr) + __dequeue_entity(cfs_rq, se); + se->vlag = 0; + place_entity(cfs_rq, se, 0); + if (se != cfs_rq->curr) + __enqueue_entity(cfs_rq, se); + cfs_rq->nr_running++; + } + } + se->sched_delayed = 0; } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 1feaa7bbc278..7fdeb5576188 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -34,8 +34,11 @@ SCHED_FEAT(CACHE_HOT_BUDDY, true) * By delaying the dequeue for non-eligible tasks, they remain in the * competition and can burn off their negative lag. When they get selected * they'll have positive lag by definition. + * + * DELAY_ZERO clips the lag on dequeue (or wakeup) to 0. */ SCHED_FEAT(DELAY_DEQUEUE, true) +SCHED_FEAT(DELAY_ZERO, true) /* * Allow wakeup-time preemption of the current task: -- cgit v1.2.3 From fc1892becd5672f52329a75c73117b60ac7841b7 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 26 Apr 2024 13:00:50 +0200 Subject: sched/eevdf: Fixup PELT vs DELAYED_DEQUEUE Note that tasks that are kept on the runqueue to burn off negative lag, are not in fact runnable anymore, they'll get dequeued the moment they get picked. As such, don't count this time towards runnable. Thanks to Valentin for spotting I had this backwards initially. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.514088302@infradead.org --- kernel/sched/fair.c | 2 ++ kernel/sched/sched.h | 6 ++++++ 2 files changed, 8 insertions(+) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1a593393e308..0eb1bbf7f269 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5402,6 +5402,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) !entity_eligible(cfs_rq, se)) { if (cfs_rq->next == se) cfs_rq->next = NULL; + update_load_avg(cfs_rq, se, 0); se->sched_delayed = 1; return false; } @@ -6841,6 +6842,7 @@ requeue_delayed_entity(struct sched_entity *se) } } + update_load_avg(cfs_rq, se, 0); se->sched_delayed = 0; } diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 263b4ded2b47..2f5d658c0631 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -820,6 +820,9 @@ static inline void se_update_runnable(struct sched_entity *se) static inline long se_runnable(struct sched_entity *se) { + if (se->sched_delayed) + return false; + if (entity_is_task(se)) return !!se->on_rq; else @@ -834,6 +837,9 @@ static inline void se_update_runnable(struct sched_entity *se) { } static inline long se_runnable(struct sched_entity *se) { + if (se->sched_delayed) + return false; + return !!se->on_rq; } -- cgit v1.2.3 From 82e9d0456e06cebe2c89f3c73cdbc9e3805e9437 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 31 May 2024 15:49:40 +0200 Subject: sched/fair: Avoid re-setting virtual deadline on 'migrations' During OSPM24 Youssef noted that migrations are re-setting the virtual deadline. Notably everything that does a dequeue-enqueue, like setting nice, changing preferred numa-node, and a myriad of other random crap, will cause this to happen. This shouldn't be. Preserve the relative virtual deadline across such dequeue/enqueue cycles. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Valentin Schneider Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.625119246@infradead.org --- include/linux/sched.h | 6 ++++-- kernel/sched/fair.c | 23 ++++++++++++++++++----- kernel/sched/features.h | 4 ++++ 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 8a3a389bd623..d25e1cfd5766 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -544,8 +544,10 @@ struct sched_entity { u64 min_vruntime; struct list_head group_node; - unsigned int on_rq; - unsigned int sched_delayed; + unsigned char on_rq; + unsigned char sched_delayed; + unsigned char rel_deadline; + /* hole */ u64 exec_start; u64 sum_exec_runtime; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0eb1bbf7f269..fef0e1f26cd8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5270,6 +5270,12 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) se->vruntime = vruntime - lag; + if (sched_feat(PLACE_REL_DEADLINE) && se->rel_deadline) { + se->deadline += se->vruntime; + se->rel_deadline = 0; + return; + } + /* * When joining the competition; the existing tasks will be, * on average, halfway through their slice, as such start tasks @@ -5382,23 +5388,24 @@ static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); static bool dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { + bool sleep = flags & DEQUEUE_SLEEP; + update_curr(cfs_rq); if (flags & DEQUEUE_DELAYED) { SCHED_WARN_ON(!se->sched_delayed); } else { - bool sleep = flags & DEQUEUE_SLEEP; - + bool delay = sleep; /* * DELAY_DEQUEUE relies on spurious wakeups, special task * states must not suffer spurious wakeups, excempt them. */ if (flags & DEQUEUE_SPECIAL) - sleep = false; + delay = false; - SCHED_WARN_ON(sleep && se->sched_delayed); + SCHED_WARN_ON(delay && se->sched_delayed); - if (sched_feat(DELAY_DEQUEUE) && sleep && + if (sched_feat(DELAY_DEQUEUE) && delay && !entity_eligible(cfs_rq, se)) { if (cfs_rq->next == se) cfs_rq->next = NULL; @@ -5429,6 +5436,11 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) clear_buddies(cfs_rq, se); update_entity_lag(cfs_rq, se); + if (sched_feat(PLACE_REL_DEADLINE) && !sleep) { + se->deadline -= se->vruntime; + se->rel_deadline = 1; + } + if (se != cfs_rq->curr) __dequeue_entity(cfs_rq, se); se->on_rq = 0; @@ -12992,6 +13004,7 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) if (p->se.sched_delayed) { dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); p->se.sched_delayed = 0; + p->se.rel_deadline = 0; if (sched_feat(DELAY_ZERO) && p->se.vlag > 0) p->se.vlag = 0; } diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 7fdeb5576188..caa4d7221d52 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -9,6 +9,10 @@ SCHED_FEAT(PLACE_LAG, true) * Give new tasks half a slice to ease into the competition. */ SCHED_FEAT(PLACE_DEADLINE_INITIAL, true) +/* + * Preserve relative virtual deadline on 'migration'. + */ +SCHED_FEAT(PLACE_REL_DEADLINE, true) /* * Inhibit (wakeup) preemption until the current task has either matched the * 0-lag point or until is has exhausted it's slice. -- cgit v1.2.3 From 85e511df3cec46021024176672a748008ed135bf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 26 Sep 2023 14:32:32 +0200 Subject: sched/eevdf: Allow shorter slices to wakeup-preempt Part of the reason to have shorter slices is to improve responsiveness. Allow shorter slices to preempt longer slices on wakeup. Task | Runtime ms | Switches | Avg delay ms | Max delay ms | Sum delay ms | 100ms massive_intr 500us cyclictest NO_PREEMPT_SHORT 1 massive_intr:(5) | 846018.956 ms | 779188 | avg: 0.273 ms | max: 58.337 ms | sum:212545.245 ms | 2 massive_intr:(5) | 853450.693 ms | 792269 | avg: 0.275 ms | max: 71.193 ms | sum:218263.588 ms | 3 massive_intr:(5) | 843888.920 ms | 771456 | avg: 0.277 ms | max: 92.405 ms | sum:213353.221 ms | 1 chromium-browse:(8) | 53015.889 ms | 131766 | avg: 0.463 ms | max: 36.341 ms | sum:60959.230 ms | 2 chromium-browse:(8) | 53864.088 ms | 136962 | avg: 0.480 ms | max: 27.091 ms | sum:65687.681 ms | 3 chromium-browse:(9) | 53637.904 ms | 132637 | avg: 0.481 ms | max: 24.756 ms | sum:63781.673 ms | 1 cyclictest:(5) | 12615.604 ms | 639689 | avg: 0.471 ms | max: 32.272 ms | sum:301351.094 ms | 2 cyclictest:(5) | 12511.583 ms | 642578 | avg: 0.448 ms | max: 44.243 ms | sum:287632.830 ms | 3 cyclictest:(5) | 12545.867 ms | 635953 | avg: 0.475 ms | max: 25.530 ms | sum:302374.658 ms | 100ms massive_intr 500us cyclictest PREEMPT_SHORT 1 massive_intr:(5) | 839843.919 ms | 837384 | avg: 0.264 ms | max: 74.366 ms | sum:221476.885 ms | 2 massive_intr:(5) | 852449.913 ms | 845086 | avg: 0.252 ms | max: 68.162 ms | sum:212595.968 ms | 3 massive_intr:(5) | 839180.725 ms | 836883 | avg: 0.266 ms | max: 69.742 ms | sum:222812.038 ms | 1 chromium-browse:(11) | 54591.481 ms | 138388 | avg: 0.458 ms | max: 35.427 ms | sum:63401.508 ms | 2 chromium-browse:(8) | 52034.541 ms | 132276 | avg: 0.436 ms | max: 31.826 ms | sum:57732.958 ms | 3 chromium-browse:(8) | 55231.771 ms | 141892 | avg: 0.469 ms | max: 27.607 ms | sum:66538.697 ms | 1 cyclictest:(5) | 13156.391 ms | 667412 | avg: 0.373 ms | max: 38.247 ms | sum:249174.502 ms | 2 cyclictest:(5) | 12688.939 ms | 665144 | avg: 0.374 ms | max: 33.548 ms | sum:248509.392 ms | 3 cyclictest:(5) | 13475.623 ms | 669110 | avg: 0.370 ms | max: 37.819 ms | sum:247673.390 ms | As per the numbers the, this makes cyclictest (short slice) it's max-delay more consistent and consistency drops the sum-delay. The trade-off is that the massive_intr (long slice) gets more context switches and a slight increase in sum-delay. Chunxin contributed did_preempt_short() where a task that lost slice protection from PREEMPT_SHORT gets rescheduled once it becomes in-eligible. [mike: numbers] Co-Developed-by: Chunxin Zang Signed-off-by: Chunxin Zang Signed-off-by: Peter Zijlstra (Intel) Tested-by: Valentin Schneider Tested-by: Mike Galbraith Link: https://lkml.kernel.org/r/20240727105030.735459544@infradead.org --- kernel/sched/fair.c | 64 ++++++++++++++++++++++++++++++++++++++++++------- kernel/sched/features.h | 5 ++++ 2 files changed, 61 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fef0e1f26cd8..cc30ea3a84e2 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -973,10 +973,10 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se); * XXX: strictly: vd_i += N*r_i/w_i such that: vd_i > ve_i * this is probably good enough. */ -static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) +static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) { if ((s64)(se->vruntime - se->deadline) < 0) - return; + return false; /* * For EEVDF the virtual time slope is determined by w_i (iow. @@ -993,10 +993,7 @@ static void update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) /* * The task has consumed its request, reschedule. */ - if (cfs_rq->nr_running > 1) { - resched_curr(rq_of(cfs_rq)); - clear_buddies(cfs_rq, se); - } + return true; } #include "pelt.h" @@ -1134,6 +1131,38 @@ static inline void update_curr_task(struct task_struct *p, s64 delta_exec) dl_server_update(p->dl_server, delta_exec); } +static inline bool did_preempt_short(struct cfs_rq *cfs_rq, struct sched_entity *curr) +{ + if (!sched_feat(PREEMPT_SHORT)) + return false; + + if (curr->vlag == curr->deadline) + return false; + + return !entity_eligible(cfs_rq, curr); +} + +static inline bool do_preempt_short(struct cfs_rq *cfs_rq, + struct sched_entity *pse, struct sched_entity *se) +{ + if (!sched_feat(PREEMPT_SHORT)) + return false; + + if (pse->slice >= se->slice) + return false; + + if (!entity_eligible(cfs_rq, pse)) + return false; + + if (entity_before(pse, se)) + return true; + + if (!entity_eligible(cfs_rq, se)) + return true; + + return false; +} + /* * Used by other classes to account runtime. */ @@ -1157,6 +1186,7 @@ static void update_curr(struct cfs_rq *cfs_rq) struct sched_entity *curr = cfs_rq->curr; struct rq *rq = rq_of(cfs_rq); s64 delta_exec; + bool resched; if (unlikely(!curr)) return; @@ -1166,7 +1196,7 @@ static void update_curr(struct cfs_rq *cfs_rq) return; curr->vruntime += calc_delta_fair(delta_exec, curr); - update_deadline(cfs_rq, curr); + resched = update_deadline(cfs_rq, curr); update_min_vruntime(cfs_rq); if (entity_is_task(curr)) { @@ -1184,6 +1214,14 @@ static void update_curr(struct cfs_rq *cfs_rq) } account_cfs_rq_runtime(cfs_rq, delta_exec); + + if (rq->nr_running == 1) + return; + + if (resched || did_preempt_short(cfs_rq, curr)) { + resched_curr(rq); + clear_buddies(cfs_rq, curr); + } } static void update_curr_fair(struct rq *rq) @@ -8605,7 +8643,17 @@ static void check_preempt_wakeup_fair(struct rq *rq, struct task_struct *p, int cfs_rq = cfs_rq_of(se); update_curr(cfs_rq); /* - * XXX pick_eevdf(cfs_rq) != se ? + * If @p has a shorter slice than current and @p is eligible, override + * current's slice protection in order to allow preemption. + * + * Note that even if @p does not turn out to be the most eligible + * task at this moment, current's slice protection will be lost. + */ + if (do_preempt_short(cfs_rq, pse, se) && se->vlag == se->deadline) + se->vlag = se->deadline + 1; + + /* + * If @p has become the most eligible task, force preemption. */ if (pick_eevdf(cfs_rq) == pse) goto preempt; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index caa4d7221d52..290874079f60 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -18,6 +18,11 @@ SCHED_FEAT(PLACE_REL_DEADLINE, true) * 0-lag point or until is has exhausted it's slice. */ SCHED_FEAT(RUN_TO_PARITY, true) +/* + * Allow wakeup of tasks with a shorter slice to cancel RESPECT_SLICE for + * current. + */ +SCHED_FEAT(PREEMPT_SHORT, true) /* * Prefer to schedule the task we woke last (assuming it failed -- cgit v1.2.3 From 857b158dc5e81c6de795ef6be006eed146098fc6 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 22 May 2023 13:46:30 +0200 Subject: sched/eevdf: Use sched_attr::sched_runtime to set request/slice suggestion Allow applications to directly set a suggested request/slice length using sched_attr::sched_runtime. The implementation clamps the value to: 0.1[ms] <= slice <= 100[ms] which is 1/10 the size of HZ=1000 and 10 times the size of HZ=100. Applications should strive to use their periodic runtime at a high confidence interval (95%+) as the target slice. Using a smaller slice will introduce undue preemptions, while using a larger value will increase latency. For all the following examples assume a scheduling quantum of 8, and for consistency all examples have W=4: {A,B,C,D}(w=1,r=8): ABCD... +---+---+---+--- t=0, V=1.5 t=1, V=3.5 A |------< A |------< B |------< B |------< C |------< C |------< D |------< D |------< ---+*------+-------+--- ---+--*----+-------+--- t=2, V=5.5 t=3, V=7.5 A |------< A |------< B |------< B |------< C |------< C |------< D |------< D |------< ---+----*--+-------+--- ---+------*+-------+--- Note: 4 identical tasks in FIFO order ~~~ {A,B}(w=1,r=16) C(w=2,r=16) AACCBBCC... +---+---+---+--- t=0, V=1.25 t=2, V=5.25 A |--------------< A |--------------< B |--------------< B |--------------< C |------< C |------< ---+*------+-------+--- ---+----*--+-------+--- t=4, V=8.25 t=6, V=12.25 A |--------------< A |--------------< B |--------------< B |--------------< C |------< C |------< ---+-------*-------+--- ---+-------+---*---+--- Note: 1 heavy task -- because q=8, double r such that the deadline of the w=2 task doesn't go below q. Note: observe the full schedule becomes: W*max(r_i/w_i) = 4*2q = 8q in length. Note: the period of the heavy task is half the full period at: W*(r_i/w_i) = 4*(2q/2) = 4q ~~~ {A,C,D}(w=1,r=16) B(w=1,r=8): BAACCBDD... +---+---+---+--- t=0, V=1.5 t=1, V=3.5 A |--------------< A |---------------< B |------< B |------< C |--------------< C |--------------< D |--------------< D |--------------< ---+*------+-------+--- ---+--*----+-------+--- t=3, V=7.5 t=5, V=11.5 A |---------------< A |---------------< B |------< B |------< C |--------------< C |--------------< D |--------------< D |--------------< ---+------*+-------+--- ---+-------+--*----+--- t=6, V=13.5 A |---------------< B |------< C |--------------< D |--------------< ---+-------+----*--+--- Note: 1 short task -- again double r so that the deadline of the short task won't be below q. Made B short because its not the leftmost task, but is eligible with the 0,1,2,3 spread. Note: like with the heavy task, the period of the short task observes: W*(r_i/w_i) = 4*(1q/1) = 4q ~~~ A(w=1,r=16) B(w=1,r=8) C(w=2,r=16) BCCAABCC... +---+---+---+--- t=0, V=1.25 t=1, V=3.25 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+*------+-------+--- ---+--*----+-------+--- t=3, V=7.25 t=5, V=11.25 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+------*+-------+--- ---+-------+--*----+--- t=6, V=13.25 A |--------------< B |------< C |------< ---+-------+----*--+--- Note: 1 heavy and 1 short task -- combine them all. Note: both the short and heavy task end up with a period of 4q ~~~ A(w=1,r=16) B(w=2,r=16) C(w=1,r=8) BBCAABBC... +---+---+---+--- t=0, V=1 t=2, V=5 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+*------+-------+--- ---+----*--+-------+--- t=3, V=7 t=5, V=11 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+------*+-------+--- ---+-------+--*----+--- t=7, V=15 A |--------------< B |------< C |------< ---+-------+------*+--- Note: as before but permuted ~~~ From all this it can be deduced that, for the steady state: - the total period (P) of a schedule is: W*max(r_i/w_i) - the average period of a task is: W*(r_i/w_i) - each task obtains the fair share: w_i/W of each full period P Signed-off-by: Peter Zijlstra (Intel) Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.842834421@infradead.org --- include/linux/sched.h | 1 + kernel/sched/core.c | 4 +++- kernel/sched/debug.c | 3 ++- kernel/sched/fair.c | 6 ++++-- kernel/sched/syscalls.c | 29 +++++++++++++++++++++++------ 5 files changed, 33 insertions(+), 10 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index d25e1cfd5766..89a3d8d94e96 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -547,6 +547,7 @@ struct sched_entity { unsigned char on_rq; unsigned char sched_delayed; unsigned char rel_deadline; + unsigned char custom_slice; /* hole */ u64 exec_start; diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 868b71b9f2e4..016581168cb8 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -4390,7 +4390,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p) p->se.nr_migrations = 0; p->se.vruntime = 0; p->se.vlag = 0; - p->se.slice = sysctl_sched_base_slice; INIT_LIST_HEAD(&p->se.group_node); /* A delayed task cannot be in clone(). */ @@ -4643,6 +4642,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) p->prio = p->normal_prio = p->static_prio; set_load_weight(p, false); + p->se.custom_slice = 0; + p->se.slice = sysctl_sched_base_slice; /* * We don't need the reset flag anymore after the fork. It has @@ -8412,6 +8413,7 @@ void __init sched_init(void) } set_load_weight(&init_task, false); + init_task.se.slice = sysctl_sched_base_slice, /* * The boot idle thread does lazy MMU switching as well: diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 831a77ab8466..01ce9a76164c 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -739,11 +739,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) else SEQ_printf(m, " %c", task_state_to_char(p)); - SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ", + SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ", p->comm, task_pid_nr(p), SPLIT_NS(p->se.vruntime), entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N', SPLIT_NS(p->se.deadline), + p->se.custom_slice ? 'S' : ' ', SPLIT_NS(p->se.slice), SPLIT_NS(p->se.sum_exec_runtime), (long long)(p->nvcsw + p->nivcsw), diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index cc30ea3a84e2..3284d3cb7147 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -983,7 +983,8 @@ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se) * nice) while the request time r_i is determined by * sysctl_sched_base_slice. */ - se->slice = sysctl_sched_base_slice; + if (!se->custom_slice) + se->slice = sysctl_sched_base_slice; /* * EEVDF: vd_i = ve_i + r_i / w_i @@ -5227,7 +5228,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) u64 vslice, vruntime = avg_vruntime(cfs_rq); s64 lag = 0; - se->slice = sysctl_sched_base_slice; + if (!se->custom_slice) + se->slice = sysctl_sched_base_slice; vslice = calc_delta_fair(se->slice, se); /* diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 60e70c889d91..4fae3cf25a3a 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -401,10 +401,20 @@ static void __setscheduler_params(struct task_struct *p, p->policy = policy; - if (dl_policy(policy)) + if (dl_policy(policy)) { __setparam_dl(p, attr); - else if (fair_policy(policy)) + } else if (fair_policy(policy)) { p->static_prio = NICE_TO_PRIO(attr->sched_nice); + if (attr->sched_runtime) { + p->se.custom_slice = 1; + p->se.slice = clamp_t(u64, attr->sched_runtime, + NSEC_PER_MSEC/10, /* HZ=1000 * 10 */ + NSEC_PER_MSEC*100); /* HZ=100 / 10 */ + } else { + p->se.custom_slice = 0; + p->se.slice = sysctl_sched_base_slice; + } + } /* * __sched_setscheduler() ensures attr->sched_priority == 0 when @@ -700,7 +710,9 @@ recheck: * but store a possible modification of reset_on_fork. */ if (unlikely(policy == p->policy)) { - if (fair_policy(policy) && attr->sched_nice != task_nice(p)) + if (fair_policy(policy) && + (attr->sched_nice != task_nice(p) || + (attr->sched_runtime != p->se.slice))) goto change; if (rt_policy(policy) && attr->sched_priority != p->rt_priority) goto change; @@ -846,6 +858,9 @@ static int _sched_setscheduler(struct task_struct *p, int policy, .sched_nice = PRIO_TO_NICE(p->static_prio), }; + if (p->se.custom_slice) + attr.sched_runtime = p->se.slice; + /* Fixup the legacy SCHED_RESET_ON_FORK hack. */ if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK; @@ -1012,12 +1027,14 @@ err_size: static void get_params(struct task_struct *p, struct sched_attr *attr) { - if (task_has_dl_policy(p)) + if (task_has_dl_policy(p)) { __getparam_dl(p, attr); - else if (task_has_rt_policy(p)) + } else if (task_has_rt_policy(p)) { attr->sched_priority = p->rt_priority; - else + } else { attr->sched_nice = task_nice(p); + attr->sched_runtime = p->se.slice; + } } /** -- cgit v1.2.3 From aef6987d89544d63a47753cf3741cabff0b5574c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 20 Jun 2024 13:16:49 +0200 Subject: sched/eevdf: Propagate min_slice up the cgroup hierarchy In the absence of an explicit cgroup slice configureation, make mixed slice length work with cgroups by propagating the min_slice up the hierarchy. This ensures the cgroup entity gets timely service to service its entities that have this timing constraint set on them. Signed-off-by: Peter Zijlstra (Intel) Tested-by: Valentin Schneider Link: https://lkml.kernel.org/r/20240727105030.948188417@infradead.org --- include/linux/sched.h | 1 + kernel/sched/fair.c | 57 ++++++++++++++++++++++++++++++++++++++++++++++++++- 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 89a3d8d94e96..3709dedbab59 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -542,6 +542,7 @@ struct sched_entity { struct rb_node run_node; u64 deadline; u64 min_vruntime; + u64 min_slice; struct list_head group_node; unsigned char on_rq; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3284d3cb7147..fea057b311f6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -782,6 +782,21 @@ static void update_min_vruntime(struct cfs_rq *cfs_rq) cfs_rq->min_vruntime = __update_min_vruntime(cfs_rq, vruntime); } +static inline u64 cfs_rq_min_slice(struct cfs_rq *cfs_rq) +{ + struct sched_entity *root = __pick_root_entity(cfs_rq); + struct sched_entity *curr = cfs_rq->curr; + u64 min_slice = ~0ULL; + + if (curr && curr->on_rq) + min_slice = curr->slice; + + if (root) + min_slice = min(min_slice, root->min_slice); + + return min_slice; +} + static inline bool __entity_less(struct rb_node *a, const struct rb_node *b) { return entity_before(__node_2_se(a), __node_2_se(b)); @@ -798,19 +813,34 @@ static inline void __min_vruntime_update(struct sched_entity *se, struct rb_node } } +static inline void __min_slice_update(struct sched_entity *se, struct rb_node *node) +{ + if (node) { + struct sched_entity *rse = __node_2_se(node); + if (rse->min_slice < se->min_slice) + se->min_slice = rse->min_slice; + } +} + /* * se->min_vruntime = min(se->vruntime, {left,right}->min_vruntime) */ static inline bool min_vruntime_update(struct sched_entity *se, bool exit) { u64 old_min_vruntime = se->min_vruntime; + u64 old_min_slice = se->min_slice; struct rb_node *node = &se->run_node; se->min_vruntime = se->vruntime; __min_vruntime_update(se, node->rb_right); __min_vruntime_update(se, node->rb_left); - return se->min_vruntime == old_min_vruntime; + se->min_slice = se->slice; + __min_slice_update(se, node->rb_right); + __min_slice_update(se, node->rb_left); + + return se->min_vruntime == old_min_vruntime && + se->min_slice == old_min_slice; } RB_DECLARE_CALLBACKS(static, min_vruntime_cb, struct sched_entity, @@ -823,6 +853,7 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se) { avg_vruntime_add(cfs_rq, se); se->min_vruntime = se->vruntime; + se->min_slice = se->slice; rb_add_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline, __entity_less, &min_vruntime_cb); } @@ -6911,6 +6942,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) int idle_h_nr_running = task_has_idle_policy(p); int task_new = !(flags & ENQUEUE_WAKEUP); int rq_h_nr_running = rq->cfs.h_nr_running; + u64 slice = 0; if (flags & ENQUEUE_DELAYED) { requeue_delayed_entity(se); @@ -6940,7 +6972,18 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) break; } cfs_rq = cfs_rq_of(se); + + /* + * Basically set the slice of group entries to the min_slice of + * their respective cfs_rq. This ensures the group can service + * its entities in the desired time-frame. + */ + if (slice) { + se->slice = slice; + se->custom_slice = 1; + } enqueue_entity(cfs_rq, se, flags); + slice = cfs_rq_min_slice(cfs_rq); cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -6962,6 +7005,9 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) se_update_runnable(se); update_cfs_group(se); + se->slice = slice; + slice = cfs_rq_min_slice(cfs_rq); + cfs_rq->h_nr_running++; cfs_rq->idle_h_nr_running += idle_h_nr_running; @@ -7027,11 +7073,15 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) int idle_h_nr_running = 0; int h_nr_running = 0; struct cfs_rq *cfs_rq; + u64 slice = 0; if (entity_is_task(se)) { p = task_of(se); h_nr_running = 1; idle_h_nr_running = task_has_idle_policy(p); + } else { + cfs_rq = group_cfs_rq(se); + slice = cfs_rq_min_slice(cfs_rq); } for_each_sched_entity(se) { @@ -7056,6 +7106,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { + slice = cfs_rq_min_slice(cfs_rq); + /* Avoid re-evaluating load for this entity: */ se = parent_entity(se); /* @@ -7077,6 +7129,9 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) se_update_runnable(se); update_cfs_group(se); + se->slice = slice; + slice = cfs_rq_min_slice(cfs_rq); + cfs_rq->h_nr_running -= h_nr_running; cfs_rq->idle_h_nr_running -= idle_h_nr_running; -- cgit v1.2.3 From 9c602adb799e72ee537c0c7ca7e828c3fe2acad6 Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Thu, 29 Aug 2024 11:11:11 +0800 Subject: sched/deadline: Fix schedstats vs deadline servers In dl_server_start(), when schedstats is enabled, the following happens: dl_server_start() dl_se->dl_server = 1; enqueue_dl_entity() update_stats_enqueue_dl() __schedstats_from_dl_se() dl_task_of() BUG_ON(dl_server(dl_se)); Since only tasks have schedstats and internal entries do not, avoid trying to update stats in this case. Fixes: 63ba8422f876 ("sched/deadline: Introduce deadline servers") Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Link: https://lkml.kernel.org/r/20240829031111.12142-1-shijie@os.amperecomputing.com --- kernel/sched/deadline.c | 38 ++++++++++++++++---------------------- 1 file changed, 16 insertions(+), 22 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 0f2df67f710b..2e84037b634f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1896,46 +1896,40 @@ static inline bool __dl_less(struct rb_node *a, const struct rb_node *b) return dl_time_before(__node_2_dle(a)->deadline, __node_2_dle(b)->deadline); } -static inline struct sched_statistics * +static __always_inline struct sched_statistics * __schedstats_from_dl_se(struct sched_dl_entity *dl_se) { + if (!schedstat_enabled()) + return NULL; + + if (dl_server(dl_se)) + return NULL; + return &dl_task_of(dl_se)->stats; } static inline void update_stats_wait_start_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_start(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void update_stats_wait_end_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_wait_end(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void update_stats_enqueue_sleeper_dl(struct dl_rq *dl_rq, struct sched_dl_entity *dl_se) { - struct sched_statistics *stats; - - if (!schedstat_enabled()) - return; - - stats = __schedstats_from_dl_se(dl_se); - __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); + struct sched_statistics *stats = __schedstats_from_dl_se(dl_se); + if (stats) + __update_stats_enqueue_sleeper(rq_of_dl_rq(dl_rq), dl_task_of(dl_se), stats); } static inline void -- cgit v1.2.3 From 75b6499024a6c1a4ef0288f280534a5c54269076 Mon Sep 17 00:00:00 2001 From: Valentin Schneider Date: Thu, 29 Aug 2024 15:53:53 +0200 Subject: sched/fair: Properly deactivate sched_delayed task upon class change __sched_setscheduler() goes through an enqueue/dequeue cycle like so: flags := DEQUEUE_SAVE | DEQUEUE_MOVE | DEQUEUE_NOCLOCK; prev_class->dequeue_task(rq, p, flags); new_class->enqueue_task(rq, p, flags); when prev_class := fair_sched_class, this is followed by: dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); the idea being that since the task has switched classes, we need to drop the sched_delayed logic and have that task be deactivated per its previous dequeue_task(..., DEQUEUE_SLEEP). Unfortunately, this leaves the task on_rq. This is missing the tail end of dequeue_entities() that issues __block_task(), which __sched_setscheduler() won't have done due to not using DEQUEUE_DELAYED - not that it should, as it is pretty much a fair_sched_class specific thing. Make switched_from_fair() properly deactivate sched_delayed tasks upon class changes via __block_task(), as if a dequeue_task(..., DEQUEUE_DELAYED) had been issued. Fixes: 2e0199df252a ("sched/fair: Prepare exit/cleanup paths for delayed_dequeue") Reported-by: "Paul E. McKenney" Reported-by: Chen Yu Signed-off-by: Valentin Schneider Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20240829135353.1524260-1-vschneid@redhat.com --- kernel/sched/fair.c | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index fea057b311f6..3a3286df282f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -5456,6 +5456,13 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); +static inline void finish_delayed_dequeue_entity(struct sched_entity *se) +{ + se->sched_delayed = 0; + if (sched_feat(DELAY_ZERO) && se->vlag > 0) + se->vlag = 0; +} + static bool dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { @@ -5531,11 +5538,8 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) != DEQUEUE_SAVE) update_min_vruntime(cfs_rq); - if (flags & DEQUEUE_DELAYED) { - se->sched_delayed = 0; - if (sched_feat(DELAY_ZERO) && se->vlag > 0) - se->vlag = 0; - } + if (flags & DEQUEUE_DELAYED) + finish_delayed_dequeue_entity(se); if (cfs_rq->nr_running == 0) update_idle_cfs_rq_clock_pelt(cfs_rq); @@ -13107,11 +13111,16 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p) * and we cannot use DEQUEUE_DELAYED. */ if (p->se.sched_delayed) { + /* First, dequeue it from its new class' structures */ dequeue_task(rq, p, DEQUEUE_NOCLOCK | DEQUEUE_SLEEP); - p->se.sched_delayed = 0; + /* + * Now, clean up the fair_sched_class side of things + * related to sched_delayed being true and that wasn't done + * due to the generic dequeue not using DEQUEUE_DELAYED. + */ + finish_delayed_dequeue_entity(&p->se); p->se.rel_deadline = 0; - if (sched_feat(DELAY_ZERO) && p->se.vlag > 0) - p->se.vlag = 0; + __block_task(rq, p); } } -- cgit v1.2.3 From 7d2180d9d943d31491d77e336557f33670cfe7fd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:49 +0200 Subject: sched: Use set_next_task(.first) where required Turns out the core_sched bits forgot to use the set_next_task(.first=true) variant. Notably: pick_next_task() := pick_task() + set_next_task(.first = true) Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.614146342@infradead.org --- kernel/sched/core.c | 4 ++-- kernel/sched/sched.h | 4 ++++ 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 016581168cb8..406b794f8423 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6010,7 +6010,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) next = rq->core_pick; if (next != prev) { put_prev_task(rq, prev); - set_next_task(rq, next); + set_next_task_first(rq, next); } rq->core_pick = NULL; @@ -6184,7 +6184,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - set_next_task(rq, next); + set_next_task_first(rq, next); out: if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2f5d658c0631..d33311d6a738 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2363,6 +2363,10 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +{ + next->sched_class->set_next_task(rq, next, true); +} /* * Helper to define a sched_class instance; each one is placed in a separate -- cgit v1.2.3 From dae4320b29f0bbdae93f7c1f6f80b19f109ca0bc Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:50 +0200 Subject: sched: Fixup set_next_task() implementations The rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) Turns out, there's still a few things in pick_next_task() that are missing from that combination. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.724111109@infradead.org --- kernel/sched/deadline.c | 6 ++--- kernel/sched/fair.c | 62 ++++++++++++++++++++++++------------------------- 2 files changed, 34 insertions(+), 34 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 2e84037b634f..f7ac7cff9b5e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2386,6 +2386,9 @@ static void set_next_task_dl(struct rq *rq, struct task_struct *p, bool first) update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 0); deadline_queue_push_tasks(rq); + + if (hrtick_enabled(rq)) + start_hrtick_dl(rq, &p->dl); } static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) @@ -2452,9 +2455,6 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) if (!p->dl_server) set_next_task_dl(rq, p, true); - if (hrtick_enabled(rq)) - start_hrtick_dl(rq, &p->dl); - return p; } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 3a3286df282f..eaeb8b2ad834 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8760,6 +8760,9 @@ again: return task_of(se); } +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first); + struct task_struct * pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) { @@ -8808,33 +8811,17 @@ again: put_prev_entity(cfs_rq, pse); set_next_entity(cfs_rq, se); + + __set_next_task_fair(rq, p, true); } - goto done; + return p; + simple: #endif if (prev) put_prev_task(rq, prev); - - for_each_sched_entity(se) - set_next_entity(cfs_rq_of(se), se); - -done: __maybe_unused; -#ifdef CONFIG_SMP - /* - * Move the next running task to the front of - * the list, so our cfs_tasks list becomes MRU - * one. - */ - list_move(&p->se.group_node, &rq->cfs_tasks); -#endif - - if (hrtick_enabled_fair(rq)) - hrtick_start_fair(rq, p); - - update_misfit_status(p, rq); - sched_fair_update_stop_tick(rq, p); - + set_next_task_fair(rq, p, true); return p; idle: @@ -13145,12 +13132,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p) } } -/* Account for a task changing its policy or group. - * - * This routine is mostly called to set cfs_rq->curr field when a task - * migrates between groups/classes. - */ -static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +static void __set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) { struct sched_entity *se = &p->se; @@ -13163,6 +13145,27 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) list_move(&se->group_node, &rq->cfs_tasks); } #endif + if (!first) + return; + + SCHED_WARN_ON(se->sched_delayed); + + if (hrtick_enabled_fair(rq)) + hrtick_start_fair(rq, p); + + update_misfit_status(p, rq); + sched_fair_update_stop_tick(rq, p); +} + +/* + * Account for a task changing its policy or group. + * + * This routine is mostly called to set cfs_rq->curr field when a task + * migrates between groups/classes. + */ +static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) +{ + struct sched_entity *se = &p->se; for_each_sched_entity(se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); @@ -13172,10 +13175,7 @@ static void set_next_task_fair(struct rq *rq, struct task_struct *p, bool first) account_cfs_rq_runtime(cfs_rq, 0); } - if (!first) - return; - - SCHED_WARN_ON(se->sched_delayed); + __set_next_task_fair(rq, p, first); } void init_cfs_rq(struct cfs_rq *cfs_rq) -- cgit v1.2.3 From 4686cc598f669dea1b50dde1568e6c65c355bc67 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:51 +0200 Subject: sched: Clean up DL server vs core sched Abide by the simple rule: pick_next_task() := pick_task() + set_next_task(.first = true) This allows us to trivially get rid of server_pick_next() and things collapse nicely. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.837303391@infradead.org --- include/linux/sched.h | 1 - kernel/sched/deadline.c | 18 +++++++----------- kernel/sched/fair.c | 13 +------------ kernel/sched/sched.h | 1 - 4 files changed, 8 insertions(+), 25 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 3709dedbab59..57cf27a3045c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -692,7 +692,6 @@ struct sched_dl_entity { */ struct rq *rq; dl_server_has_tasks_f server_has_tasks; - dl_server_pick_f server_pick_next; dl_server_pick_f server_pick_task; #ifdef CONFIG_RT_MUTEXES diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index f7ac7cff9b5e..2ea929ce9e4f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1665,12 +1665,10 @@ void dl_server_stop(struct sched_dl_entity *dl_se) void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_next, dl_server_pick_f pick_task) { dl_se->rq = rq; dl_se->server_has_tasks = has_tasks; - dl_se->server_pick_next = pick_next; dl_se->server_pick_task = pick_task; } @@ -2404,9 +2402,8 @@ static struct sched_dl_entity *pick_next_dl_entity(struct dl_rq *dl_rq) /* * __pick_next_task_dl - Helper to pick the next -deadline task to run. * @rq: The runqueue to pick the next task from. - * @peek: If true, just peek at the next task. Only relevant for dlserver. */ -static struct task_struct *__pick_next_task_dl(struct rq *rq, bool peek) +static struct task_struct *__pick_task_dl(struct rq *rq) { struct sched_dl_entity *dl_se; struct dl_rq *dl_rq = &rq->dl; @@ -2420,10 +2417,7 @@ again: WARN_ON_ONCE(!dl_se); if (dl_server(dl_se)) { - if (IS_ENABLED(CONFIG_SMP) && peek) - p = dl_se->server_pick_task(dl_se); - else - p = dl_se->server_pick_next(dl_se); + p = dl_se->server_pick_task(dl_se); if (!p) { dl_se->dl_yielded = 1; update_curr_dl_se(rq, dl_se, 0); @@ -2440,7 +2434,7 @@ again: #ifdef CONFIG_SMP static struct task_struct *pick_task_dl(struct rq *rq) { - return __pick_next_task_dl(rq, true); + return __pick_task_dl(rq); } #endif @@ -2448,11 +2442,13 @@ static struct task_struct *pick_next_task_dl(struct rq *rq) { struct task_struct *p; - p = __pick_next_task_dl(rq, false); + p = __pick_task_dl(rq); if (!p) return p; - if (!p->dl_server) + if (p->dl_server) + p->sched_class->set_next_task(rq, p, true); + else set_next_task_dl(rq, p, true); return p; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index eaeb8b2ad834..8379100905b8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8862,16 +8862,7 @@ static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) static struct task_struct *fair_server_pick_task(struct sched_dl_entity *dl_se) { -#ifdef CONFIG_SMP return pick_task_fair(dl_se->rq); -#else - return NULL; -#endif -} - -static struct task_struct *fair_server_pick_next(struct sched_dl_entity *dl_se) -{ - return pick_next_task_fair(dl_se->rq, NULL, NULL); } void fair_server_init(struct rq *rq) @@ -8880,9 +8871,7 @@ void fair_server_init(struct rq *rq) init_dl_entity(dl_se); - dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_next, - fair_server_pick_task); - + dl_server_init(dl_se, rq, fair_server_has_tasks, fair_server_pick_task); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index d33311d6a738..80605e2da483 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -362,7 +362,6 @@ extern void dl_server_start(struct sched_dl_entity *dl_se); extern void dl_server_stop(struct sched_dl_entity *dl_se); extern void dl_server_init(struct sched_dl_entity *dl_se, struct rq *rq, dl_server_has_tasks_f has_tasks, - dl_server_pick_f pick_next, dl_server_pick_f pick_task); extern void dl_server_update_idle_time(struct rq *rq, -- cgit v1.2.3 From 260598f142c34811d226fdde5ab0346b48181439 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:52 +0200 Subject: sched: Split up put_prev_task_balance() With the goal of pushing put_prev_task() after pick_task() / into pick_next_task(). Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224015.943143811@infradead.org --- kernel/sched/core.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 406b794f8423..36f9bc509ff2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5841,8 +5841,8 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt) schedstat_inc(this_rq()->sched_count); } -static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, - struct rq_flags *rf) +static void prev_balance(struct rq *rq, struct task_struct *prev, + struct rq_flags *rf) { #ifdef CONFIG_SMP const struct sched_class *class; @@ -5860,8 +5860,6 @@ static void put_prev_task_balance(struct rq *rq, struct task_struct *prev, } #endif - put_prev_task(rq, prev); - /* * We've updated @prev and no longer need the server link, clear it. * Must be done before ->pick_next_task() because that can (re)set @@ -5917,7 +5915,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } restart: - put_prev_task_balance(rq, prev, rf); + prev_balance(rq, prev, rf); + put_prev_task(rq, prev); for_each_class(class) { p = class->pick_next_task(rq); @@ -6017,7 +6016,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) goto out; } - put_prev_task_balance(rq, prev, rf); + prev_balance(rq, prev, rf); + put_prev_task(rq, prev); smt_mask = cpu_smt_mask(cpu); need_sync = !!rq->core->core_cookie; -- cgit v1.2.3 From fd03c5b8585562d60f8b597b4332d28f48abfe7d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:53 +0200 Subject: sched: Rework pick_next_task() The current rule is that: pick_next_task() := pick_task() + set_next_task(.first = true) And many classes implement it directly as such. Change things around to make pick_next_task() optional while also changing the definition to: pick_next_task(prev) := pick_task() + put_prev_task() + set_next_task(.first = true) The reason is that sched_ext would like to have a 'final' call that knows the next task. By placing put_prev_task() right next to set_next_task() (as it already is for sched_core) this becomes trivial. As a bonus, this is a nice cleanup on its own. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.051225657@infradead.org --- kernel/sched/core.c | 21 +++++++++++++++------ kernel/sched/deadline.c | 21 +-------------------- kernel/sched/fair.c | 11 +++++------ kernel/sched/idle.c | 16 ++-------------- kernel/sched/rt.c | 13 +------------ kernel/sched/sched.h | 16 ++++++++++++---- kernel/sched/stop_task.c | 13 +------------ 7 files changed, 37 insertions(+), 74 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 36f9bc509ff2..b9429eb5dbbe 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5893,8 +5893,9 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { + p = pick_task_idle(rq); put_prev_task(rq, prev); - p = pick_next_task_idle(rq); + set_next_task_first(rq, p); } /* @@ -5916,12 +5917,20 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) restart: prev_balance(rq, prev, rf); - put_prev_task(rq, prev); for_each_class(class) { - p = class->pick_next_task(rq); - if (p) - return p; + if (class->pick_next_task) { + p = class->pick_next_task(rq, prev); + if (p) + return p; + } else { + p = class->pick_task(rq); + if (p) { + put_prev_task(rq, prev); + set_next_task_first(rq, p); + return p; + } + } } BUG(); /* The idle class should always have a runnable task. */ @@ -6017,7 +6026,6 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } prev_balance(rq, prev, rf); - put_prev_task(rq, prev); smt_mask = cpu_smt_mask(cpu); need_sync = !!rq->core->core_cookie; @@ -6184,6 +6192,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: + put_prev_task(rq, prev); set_next_task_first(rq, next); out: if (rq->core->core_forceidle_count && next == rq->idle) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 2ea929ce9e4f..a1547e1cd96e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2431,28 +2431,10 @@ again: return p; } -#ifdef CONFIG_SMP static struct task_struct *pick_task_dl(struct rq *rq) { return __pick_task_dl(rq); } -#endif - -static struct task_struct *pick_next_task_dl(struct rq *rq) -{ - struct task_struct *p; - - p = __pick_task_dl(rq); - if (!p) - return p; - - if (p->dl_server) - p->sched_class->set_next_task(rq, p, true); - else - set_next_task_dl(rq, p, true); - - return p; -} static void put_prev_task_dl(struct rq *rq, struct task_struct *p) { @@ -3146,13 +3128,12 @@ DEFINE_SCHED_CLASS(dl) = { .wakeup_preempt = wakeup_preempt_dl, - .pick_next_task = pick_next_task_dl, + .pick_task = pick_task_dl, .put_prev_task = put_prev_task_dl, .set_next_task = set_next_task_dl, #ifdef CONFIG_SMP .balance = balance_dl, - .pick_task = pick_task_dl, .select_task_rq = select_task_rq_dl, .migrate_task_rq = migrate_task_rq_dl, .set_cpus_allowed = set_cpus_allowed_dl, diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8379100905b8..53556b08edef 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8777,7 +8777,7 @@ again: se = &p->se; #ifdef CONFIG_FAIR_GROUP_SCHED - if (!prev || prev->sched_class != &fair_sched_class) + if (prev->sched_class != &fair_sched_class) goto simple; /* @@ -8819,8 +8819,7 @@ again: simple: #endif - if (prev) - put_prev_task(rq, prev); + put_prev_task(rq, prev); set_next_task_fair(rq, p, true); return p; @@ -8850,9 +8849,9 @@ idle: return NULL; } -static struct task_struct *__pick_next_task_fair(struct rq *rq) +static struct task_struct *__pick_next_task_fair(struct rq *rq, struct task_struct *prev) { - return pick_next_task_fair(rq, NULL, NULL); + return pick_next_task_fair(rq, prev, NULL); } static bool fair_server_has_tasks(struct sched_dl_entity *dl_se) @@ -13490,13 +13489,13 @@ DEFINE_SCHED_CLASS(fair) = { .wakeup_preempt = check_preempt_wakeup_fair, + .pick_task = pick_task_fair, .pick_next_task = __pick_next_task_fair, .put_prev_task = put_prev_task_fair, .set_next_task = set_next_task_fair, #ifdef CONFIG_SMP .balance = balance_fair, - .pick_task = pick_task_fair, .select_task_rq = select_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair, diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index 16074209ce26..a343e1c97f56 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -462,21 +462,10 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir next->se.exec_start = rq_clock_task(rq); } -#ifdef CONFIG_SMP -static struct task_struct *pick_task_idle(struct rq *rq) +struct task_struct *pick_task_idle(struct rq *rq) { return rq->idle; } -#endif - -struct task_struct *pick_next_task_idle(struct rq *rq) -{ - struct task_struct *next = rq->idle; - - set_next_task_idle(rq, next, true); - - return next; -} /* * It is not legal to sleep in the idle task - print a warning @@ -531,13 +520,12 @@ DEFINE_SCHED_CLASS(idle) = { .wakeup_preempt = wakeup_preempt_idle, - .pick_next_task = pick_next_task_idle, + .pick_task = pick_task_idle, .put_prev_task = put_prev_task_idle, .set_next_task = set_next_task_idle, #ifdef CONFIG_SMP .balance = balance_idle, - .pick_task = pick_task_idle, .select_task_rq = select_task_rq_idle, .set_cpus_allowed = set_cpus_allowed_common, #endif diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index fdc8e059cab0..8025f394b0e0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1748,16 +1748,6 @@ static struct task_struct *pick_task_rt(struct rq *rq) return p; } -static struct task_struct *pick_next_task_rt(struct rq *rq) -{ - struct task_struct *p = pick_task_rt(rq); - - if (p) - set_next_task_rt(rq, p, true); - - return p; -} - static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { struct sched_rt_entity *rt_se = &p->rt; @@ -2645,13 +2635,12 @@ DEFINE_SCHED_CLASS(rt) = { .wakeup_preempt = wakeup_preempt_rt, - .pick_next_task = pick_next_task_rt, + .pick_task = pick_task_rt, .put_prev_task = put_prev_task_rt, .set_next_task = set_next_task_rt, #ifdef CONFIG_SMP .balance = balance_rt, - .pick_task = pick_task_rt, .select_task_rq = select_task_rq_rt, .set_cpus_allowed = set_cpus_allowed_common, .rq_online = rq_online_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 80605e2da483..64a4ed758ba1 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2300,7 +2300,17 @@ struct sched_class { void (*wakeup_preempt)(struct rq *rq, struct task_struct *p, int flags); - struct task_struct *(*pick_next_task)(struct rq *rq); + struct task_struct *(*pick_task)(struct rq *rq); + /* + * Optional! When implemented pick_next_task() should be equivalent to: + * + * next = pick_task(); + * if (next) { + * put_prev_task(prev); + * set_next_task_first(next); + * } + */ + struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); void (*put_prev_task)(struct rq *rq, struct task_struct *p); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); @@ -2309,8 +2319,6 @@ struct sched_class { int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); int (*select_task_rq)(struct task_struct *p, int task_cpu, int flags); - struct task_struct * (*pick_task)(struct rq *rq); - void (*migrate_task_rq)(struct task_struct *p, int new_cpu); void (*task_woken)(struct rq *this_rq, struct task_struct *task); @@ -2421,7 +2429,7 @@ static inline bool sched_fair_runnable(struct rq *rq) } extern struct task_struct *pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf); -extern struct task_struct *pick_next_task_idle(struct rq *rq); +extern struct task_struct *pick_task_idle(struct rq *rq); #define SCA_CHECK 0x01 #define SCA_MIGRATE_DISABLE 0x02 diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 4cf02074fa9e..0fd5352ff0ce 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -41,16 +41,6 @@ static struct task_struct *pick_task_stop(struct rq *rq) return rq->stop; } -static struct task_struct *pick_next_task_stop(struct rq *rq) -{ - struct task_struct *p = pick_task_stop(rq); - - if (p) - set_next_task_stop(rq, p, true); - - return p; -} - static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { @@ -112,13 +102,12 @@ DEFINE_SCHED_CLASS(stop) = { .wakeup_preempt = wakeup_preempt_stop, - .pick_next_task = pick_next_task_stop, + .pick_task = pick_task_stop, .put_prev_task = put_prev_task_stop, .set_next_task = set_next_task_stop, #ifdef CONFIG_SMP .balance = balance_stop, - .pick_task = pick_task_stop, .select_task_rq = select_task_rq_stop, .set_cpus_allowed = set_cpus_allowed_common, #endif -- cgit v1.2.3 From 436f3eed5c69c1048a5754df6e3dbb291e5cccbd Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:54 +0200 Subject: sched: Combine the last put_prev_task() and the first set_next_task() Ensure the last put_prev_task() and the first set_next_task() always go together. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org --- kernel/sched/core.c | 17 ++++------------- kernel/sched/fair.c | 3 +-- kernel/sched/sched.h | 10 +++++++++- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b9429eb5dbbe..8a1cf93da203 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { p = pick_task_idle(rq); - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); } /* @@ -5926,8 +5925,7 @@ restart: } else { p = class->pick_task(rq); if (p) { - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); return p; } } @@ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; - if (next != prev) { - put_prev_task(rq, prev); - set_next_task_first(rq, next); - } - rq->core_pick = NULL; - goto out; + goto out_set_next; } prev_balance(rq, prev, rf); @@ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - put_prev_task(rq, prev); - set_next_task_first(rq, next); -out: + put_prev_set_next_task(rq, prev, next); if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 53556b08edef..c5b7873dcc30 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8819,8 +8819,7 @@ again: simple: #endif - put_prev_task(rq, prev); - set_next_task_fair(rq, p, true); + put_prev_set_next_task(rq, prev, p); return p; idle: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64a4ed758ba1..aae35818cca4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +static inline void put_prev_set_next_task(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) { + WARN_ON_ONCE(rq->curr != prev); + + if (next == prev) + return; + + prev->sched_class->put_prev_task(rq, prev); next->sched_class->set_next_task(rq, next, true); } -- cgit v1.2.3 From bd9bbc96e8356886971317f57994247ca491dbf1 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:55 +0200 Subject: sched: Rework dl_server When a task is selected through a dl_server, it will have p->dl_server set, such that it can account runtime to the dl_server, see update_curr_task(). Currently p->dl_server is set in pick*task() whenever it goes through the dl_server, clearing it is a bit of a mess though. The trivial solution is clearing it on the final put (now that we have this location). However, this gives a problem when: p = pick_task(rq); if (p) put_prev_set_next_task(rq, prev, next); picks the same task but through a different path, notably when it goes from picking through the dl_server to a direct pick or vice-versa. In that case we cannot readily determine wether we should clear or preserve p->dl_server. An additional complication is pick_*task() setting p->dl_server for a remote pick, it might still need to update runtime before it schedules the core_pick. Close all these holes and remove all the random clearing of p->dl_server by: - having pick_*task() manage rq->dl_server - having the final put_prev_task() clear p->dl_server - having the first set_next_task() set p->dl_server = rq->dl_server - complicate the core_sched code to save/restore rq->dl_server where appropriate. Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.259853414@infradead.org --- kernel/sched/core.c | 40 +++++++++++++++------------------------- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 10 ++-------- kernel/sched/sched.h | 14 ++++++++++++++ 4 files changed, 32 insertions(+), 34 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 8a1cf93da203..ffcd637dc8e4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -3668,8 +3668,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags, rq->idle_stamp = 0; } #endif - - p->dl_server = NULL; } /* @@ -5859,14 +5857,6 @@ static void prev_balance(struct rq *rq, struct task_struct *prev, break; } #endif - - /* - * We've updated @prev and no longer need the server link, clear it. - * Must be done before ->pick_next_task() because that can (re)set - * ->dl_server. - */ - if (prev->dl_server) - prev->dl_server = NULL; } /* @@ -5878,6 +5868,8 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) const struct sched_class *class; struct task_struct *p; + rq->dl_server = NULL; + /* * Optimization: we know that if all tasks are in the fair class we can * call that function directly, but only if the @prev task wasn't of a @@ -5897,20 +5889,6 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) put_prev_set_next_task(rq, prev, p); } - /* - * This is a normal CFS pick, but the previous could be a DL pick. - * Clear it as previous is no longer picked. - */ - if (prev->dl_server) - prev->dl_server = NULL; - - /* - * This is the fast path; it cannot be a DL server pick; - * therefore even if @p == @prev, ->dl_server must be NULL. - */ - if (p->dl_server) - p->dl_server = NULL; - return p; } @@ -5958,6 +5936,8 @@ static inline struct task_struct *pick_task(struct rq *rq) const struct sched_class *class; struct task_struct *p; + rq->dl_server = NULL; + for_each_class(class) { p = class->pick_task(rq); if (p) @@ -5996,6 +5976,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * another cpu during offline. */ rq->core_pick = NULL; + rq->core_dl_server = NULL; return __pick_next_task(rq, prev, rf); } @@ -6014,7 +5995,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; + rq->dl_server = rq->core_dl_server; rq->core_pick = NULL; + rq->core_dl_server = NULL; goto out_set_next; } @@ -6059,6 +6042,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) next = pick_task(rq); if (!next->core_cookie) { rq->core_pick = NULL; + rq->core_dl_server = NULL; /* * For robustness, update the min_vruntime_fi for * unconstrained picks as well. @@ -6086,7 +6070,9 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (i != cpu && (rq_i != rq->core || !core_clock_updated)) update_rq_clock(rq_i); - p = rq_i->core_pick = pick_task(rq_i); + rq_i->core_pick = p = pick_task(rq_i); + rq_i->core_dl_server = rq_i->dl_server; + if (!max || prio_less(max, p, fi_before)) max = p; } @@ -6110,6 +6096,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } rq_i->core_pick = p; + rq_i->core_dl_server = NULL; if (p == rq_i->idle) { if (rq_i->nr_running) { @@ -6170,6 +6157,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (i == cpu) { rq_i->core_pick = NULL; + rq_i->core_dl_server = NULL; continue; } @@ -6178,6 +6166,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) if (rq_i->curr == rq_i->core_pick) { rq_i->core_pick = NULL; + rq_i->core_dl_server = NULL; continue; } @@ -8401,6 +8390,7 @@ void __init sched_init(void) #ifdef CONFIG_SCHED_CORE rq->core = rq; rq->core_pick = NULL; + rq->core_dl_server = NULL; rq->core_enabled = 0; rq->core_tree = RB_ROOT; rq->core_forceidle_count = 0; diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index a1547e1cd96e..e83b68430627 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2423,7 +2423,7 @@ again: update_curr_dl_se(rq, dl_se, 0); goto again; } - p->dl_server = dl_se; + rq->dl_server = dl_se; } else { p = dl_task_of(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index c5b7873dcc30..f67311217063 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8749,14 +8749,6 @@ again: cfs_rq = group_cfs_rq(se); } while (cfs_rq); - /* - * This can be called from directly from CFS's ->pick_task() or indirectly - * from DL's ->pick_task when fair server is enabled. In the indirect case, - * DL will set ->dl_server just after this function is called, so its Ok to - * clear. In the direct case, we are picking directly so we must clear it. - */ - task_of(se)->dl_server = NULL; - return task_of(se); } @@ -8780,6 +8772,8 @@ again: if (prev->sched_class != &fair_sched_class) goto simple; + __put_prev_set_next_dl_server(rq, prev, p); + /* * Because of the set_next_buddy() in dequeue_task_fair() it is rather * likely that a next task is from the same cgroup as the current. diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index aae35818cca4..2a216c9153e9 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1066,6 +1066,7 @@ struct rq { unsigned int nr_uninterruptible; struct task_struct __rcu *curr; + struct sched_dl_entity *dl_server; struct task_struct *idle; struct task_struct *stop; unsigned long next_balance; @@ -1193,6 +1194,7 @@ struct rq { /* per rq */ struct rq *core; struct task_struct *core_pick; + struct sched_dl_entity *core_dl_server; unsigned int core_enabled; unsigned int core_sched_seq; struct rb_root core_tree; @@ -2370,12 +2372,24 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } +static inline void +__put_prev_set_next_dl_server(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) +{ + prev->dl_server = NULL; + next->dl_server = rq->dl_server; + rq->dl_server = NULL; +} + static inline void put_prev_set_next_task(struct rq *rq, struct task_struct *prev, struct task_struct *next) { WARN_ON_ONCE(rq->curr != prev); + __put_prev_set_next_dl_server(rq, prev, next); + if (next == prev) return; -- cgit v1.2.3 From b2d70222dbf2a2ff7a972a685d249a5d75afa87f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 14 Aug 2024 00:25:56 +0200 Subject: sched: Add put_prev_task(.next) In order to tell the previous sched_class what the next task is, add put_prev_task(.next). Notable SCX will use this to: 1) determine the next task will leave the SCX sched class and push the current task to another CPU if possible. 2) statistics on how often and which other classes preempt it Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/r/20240813224016.367421076@infradead.org --- kernel/sched/deadline.c | 2 +- kernel/sched/fair.c | 2 +- kernel/sched/idle.c | 2 +- kernel/sched/rt.c | 2 +- kernel/sched/sched.h | 6 +++--- kernel/sched/stop_task.c | 2 +- 6 files changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e83b68430627..9ce93d0bf452 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -2436,7 +2436,7 @@ static struct task_struct *pick_task_dl(struct rq *rq) return __pick_task_dl(rq); } -static void put_prev_task_dl(struct rq *rq, struct task_struct *p) +static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_dl_entity *dl_se = &p->dl; struct dl_rq *dl_rq = &rq->dl; diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f67311217063..d697a0a3fc73 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8869,7 +8869,7 @@ void fair_server_init(struct rq *rq) /* * Account for a descheduled task: */ -static void put_prev_task_fair(struct rq *rq, struct task_struct *prev) +static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct task_struct *next) { struct sched_entity *se = &prev->se; struct cfs_rq *cfs_rq; diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index a343e1c97f56..7a105a0123aa 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c @@ -450,7 +450,7 @@ static void wakeup_preempt_idle(struct rq *rq, struct task_struct *p, int flags) resched_curr(rq); } -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct task_struct *next) { dl_server_update_idle_time(rq, prev); } diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 8025f394b0e0..172c588de542 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c @@ -1748,7 +1748,7 @@ static struct task_struct *pick_task_rt(struct rq *rq) return p; } -static void put_prev_task_rt(struct rq *rq, struct task_struct *p) +static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct task_struct *next) { struct sched_rt_entity *rt_se = &p->rt; struct rt_rq *rt_rq = &rq->rt; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 2a216c9153e9..3744f16a1293 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2314,7 +2314,7 @@ struct sched_class { */ struct task_struct *(*pick_next_task)(struct rq *rq, struct task_struct *prev); - void (*put_prev_task)(struct rq *rq, struct task_struct *p); + void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct task_struct *next); void (*set_next_task)(struct rq *rq, struct task_struct *p, bool first); #ifdef CONFIG_SMP @@ -2364,7 +2364,7 @@ struct sched_class { static inline void put_prev_task(struct rq *rq, struct task_struct *prev) { WARN_ON_ONCE(rq->curr != prev); - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, NULL); } static inline void set_next_task(struct rq *rq, struct task_struct *next) @@ -2393,7 +2393,7 @@ static inline void put_prev_set_next_task(struct rq *rq, if (next == prev) return; - prev->sched_class->put_prev_task(rq, prev); + prev->sched_class->put_prev_task(rq, prev, next); next->sched_class->set_next_task(rq, next, true); } diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c index 0fd5352ff0ce..058dd42e3d9b 100644 --- a/kernel/sched/stop_task.c +++ b/kernel/sched/stop_task.c @@ -59,7 +59,7 @@ static void yield_task_stop(struct rq *rq) BUG(); /* the stop task should never yield, its pointless. */ } -static void put_prev_task_stop(struct rq *rq, struct task_struct *prev) +static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct task_struct *next) { update_curr_common(rq); } -- cgit v1.2.3 From 3dcac251b066b60dba6d44c97d76faeb00bf19c5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 9 Aug 2024 09:22:40 +0000 Subject: sched/core: Introduce SM_IDLE and an idle re-entry fast-path in __schedule() Since commit b2a02fc43a1f ("smp: Optimize send_call_function_single_ipi()") an idle CPU in TIF_POLLING_NRFLAG mode can be pulled out of idle by setting TIF_NEED_RESCHED flag to service an IPI without actually sending an interrupt. Even in cases where the IPI handler does not queue a task on the idle CPU, do_idle() will call __schedule() since need_resched() returns true in these cases. Introduce and use SM_IDLE to identify call to __schedule() from schedule_idle() and shorten the idle re-entry time by skipping pick_next_task() when nr_running is 0 and the previous task is the idle task. With the SM_IDLE fast-path, the time taken to complete a fixed set of IPIs using ipistorm improves noticeably. Following are the numbers from a dual socket Intel Ice Lake Xeon server (2 x 32C/64T) and 3rd Generation AMD EPYC system (2 x 64C/128T) (boost on, C2 disabled) running ipistorm between CPU8 and CPU16: cmdline: insmod ipistorm.ko numipi=100000 single=1 offset=8 cpulist=8 wait=1 ================================================================== Test : ipistorm (modified) Units : Normalized runtime Interpretation: Lower is better Statistic : AMean ======================= Intel Ice Lake Xeon ====================== kernel: time [pct imp] tip:sched/core 1.00 [baseline] tip:sched/core + SM_IDLE 0.80 [20.51%] ==================== 3rd Generation AMD EPYC ===================== kernel: time [pct imp] tip:sched/core 1.00 [baseline] tip:sched/core + SM_IDLE 0.90 [10.17%] ================================================================== [ kprateek: Commit message, SM_RTLOCK_WAIT fix ] Signed-off-by: Peter Zijlstra (Intel) Not-yet-signed-off-by: Peter Zijlstra Signed-off-by: K Prateek Nayak Signed-off-by: Peter Zijlstra (Intel) Acked-by: Vincent Guittot Link: https://lore.kernel.org/r/20240809092240.6921-1-kprateek.nayak@amd.com --- kernel/sched/core.c | 45 ++++++++++++++++++++++++++------------------- 1 file changed, 26 insertions(+), 19 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index ffcd637dc8e4..2922facebae7 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6410,19 +6410,12 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * Constants for the sched_mode argument of __schedule(). * * The mode argument allows RT enabled kernels to differentiate a - * preemption from blocking on an 'sleeping' spin/rwlock. Note that - * SM_MASK_PREEMPT for !RT has all bits set, which allows the compiler to - * optimize the AND operation out and just check for zero. + * preemption from blocking on an 'sleeping' spin/rwlock. */ -#define SM_NONE 0x0 -#define SM_PREEMPT 0x1 -#define SM_RTLOCK_WAIT 0x2 - -#ifndef CONFIG_PREEMPT_RT -# define SM_MASK_PREEMPT (~0U) -#else -# define SM_MASK_PREEMPT SM_PREEMPT -#endif +#define SM_IDLE (-1) +#define SM_NONE 0 +#define SM_PREEMPT 1 +#define SM_RTLOCK_WAIT 2 /* * __schedule() is the main scheduler function. @@ -6463,9 +6456,14 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) * * WARNING: must be called with preemption disabled! */ -static void __sched notrace __schedule(unsigned int sched_mode) +static void __sched notrace __schedule(int sched_mode) { struct task_struct *prev, *next; + /* + * On PREEMPT_RT kernel, SM_RTLOCK_WAIT is noted + * as a preemption by schedule_debug() and RCU. + */ + bool preempt = sched_mode > SM_NONE; unsigned long *switch_count; unsigned long prev_state; struct rq_flags rf; @@ -6476,13 +6474,13 @@ static void __sched notrace __schedule(unsigned int sched_mode) rq = cpu_rq(cpu); prev = rq->curr; - schedule_debug(prev, !!sched_mode); + schedule_debug(prev, preempt); if (sched_feat(HRTICK) || sched_feat(HRTICK_DL)) hrtick_clear(rq); local_irq_disable(); - rcu_note_context_switch(!!sched_mode); + rcu_note_context_switch(preempt); /* * Make sure that signal_pending_state()->signal_pending() below @@ -6511,12 +6509,20 @@ static void __sched notrace __schedule(unsigned int sched_mode) switch_count = &prev->nivcsw; + /* Task state changes only considers SM_PREEMPT as preemption */ + preempt = sched_mode == SM_PREEMPT; + /* * We must load prev->state once (task_struct::state is volatile), such * that we form a control dependency vs deactivate_task() below. */ prev_state = READ_ONCE(prev->__state); - if (!(sched_mode & SM_MASK_PREEMPT) && prev_state) { + if (sched_mode == SM_IDLE) { + if (!rq->nr_running) { + next = prev; + goto picked; + } + } else if (!preempt && prev_state) { if (signal_pending_state(prev_state, prev)) { WRITE_ONCE(prev->__state, TASK_RUNNING); } else { @@ -6547,6 +6553,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) } next = pick_next_task(rq, prev, &rf); +picked: clear_tsk_need_resched(prev); clear_preempt_need_resched(); #ifdef CONFIG_SCHED_DEBUG @@ -6588,7 +6595,7 @@ static void __sched notrace __schedule(unsigned int sched_mode) psi_account_irqtime(rq, prev, next); psi_sched_switch(prev, next, !task_on_rq_queued(prev)); - trace_sched_switch(sched_mode & SM_MASK_PREEMPT, prev, next, prev_state); + trace_sched_switch(preempt, prev, next, prev_state); /* Also unlocks the rq: */ rq = context_switch(rq, prev, next, &rf); @@ -6664,7 +6671,7 @@ static void sched_update_worker(struct task_struct *tsk) } } -static __always_inline void __schedule_loop(unsigned int sched_mode) +static __always_inline void __schedule_loop(int sched_mode) { do { preempt_disable(); @@ -6709,7 +6716,7 @@ void __sched schedule_idle(void) */ WARN_ON_ONCE(current->__state); do { - __schedule(SM_NONE); + __schedule(SM_IDLE); } while (need_resched()); } -- cgit v1.2.3 From 5d871a63997fa8bcf80adb49ea1f2f7840dff932 Mon Sep 17 00:00:00 2001 From: Vincent Guittot Date: Wed, 4 Sep 2024 11:24:17 +0200 Subject: sched/fair: Move effective_cpu_util() and effective_cpu_util() in fair.c Move effective_cpu_util() and sched_cpu_util() functions in fair.c file with others utilization related functions. No functional change. Signed-off-by: Vincent Guittot Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20240904092417.20660-1-vincent.guittot@linaro.org --- kernel/sched/fair.c | 99 +++++++++++++++++++++++++++++++++++++++++++++++ kernel/sched/syscalls.c | 101 ------------------------------------------------ 2 files changed, 99 insertions(+), 101 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index d697a0a3fc73..9e19009da48f 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8084,6 +8084,105 @@ static unsigned long cpu_util_without(int cpu, struct task_struct *p) return cpu_util(cpu, p, -1, 0); } +/* + * This function computes an effective utilization for the given CPU, to be + * used for frequency selection given the linear relation: f = u * f_max. + * + * The scheduler tracks the following metrics: + * + * cpu_util_{cfs,rt,dl,irq}() + * cpu_bw_dl() + * + * Where the cfs,rt and dl util numbers are tracked with the same metric and + * synchronized windows and are thus directly comparable. + * + * The cfs,rt,dl utilization are the running times measured with rq->clock_task + * which excludes things like IRQ and steal-time. These latter are then accrued + * in the IRQ utilization. + * + * The DL bandwidth number OTOH is not a measured metric but a value computed + * based on the task model parameters and gives the minimal utilization + * required to meet deadlines. + */ +unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, + unsigned long *min, + unsigned long *max) +{ + unsigned long util, irq, scale; + struct rq *rq = cpu_rq(cpu); + + scale = arch_scale_cpu_capacity(cpu); + + /* + * Early check to see if IRQ/steal time saturates the CPU, can be + * because of inaccuracies in how we track these -- see + * update_irq_load_avg(). + */ + irq = cpu_util_irq(rq); + if (unlikely(irq >= scale)) { + if (min) + *min = scale; + if (max) + *max = scale; + return scale; + } + + if (min) { + /* + * The minimum utilization returns the highest level between: + * - the computed DL bandwidth needed with the IRQ pressure which + * steals time to the deadline task. + * - The minimum performance requirement for CFS and/or RT. + */ + *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); + + /* + * When an RT task is runnable and uclamp is not used, we must + * ensure that the task will run at maximum compute capacity. + */ + if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) + *min = max(*min, scale); + } + + /* + * Because the time spend on RT/DL tasks is visible as 'lost' time to + * CFS tasks and we use the same metric to track the effective + * utilization (PELT windows are synchronized) we can directly add them + * to obtain the CPU's actual utilization. + */ + util = util_cfs + cpu_util_rt(rq); + util += cpu_util_dl(rq); + + /* + * The maximum hint is a soft bandwidth requirement, which can be lower + * than the actual utilization because of uclamp_max requirements. + */ + if (max) + *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); + + if (util >= scale) + return scale; + + /* + * There is still idle time; further improve the number by using the + * IRQ metric. Because IRQ/steal time is hidden from the task clock we + * need to scale the task numbers: + * + * max - irq + * U' = irq + --------- * U + * max + */ + util = scale_irq_capacity(util, irq, scale); + util += irq; + + return min(scale, util); +} + +unsigned long sched_cpu_util(int cpu) +{ + return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); +} + /* * energy_env - Utilization landscape for energy estimation. * @task_busy_time: Utilization contribution by the task for which we test the diff --git a/kernel/sched/syscalls.c b/kernel/sched/syscalls.c index 4fae3cf25a3a..c62acf509b74 100644 --- a/kernel/sched/syscalls.c +++ b/kernel/sched/syscalls.c @@ -258,107 +258,6 @@ int sched_core_idle_cpu(int cpu) #endif -#ifdef CONFIG_SMP -/* - * This function computes an effective utilization for the given CPU, to be - * used for frequency selection given the linear relation: f = u * f_max. - * - * The scheduler tracks the following metrics: - * - * cpu_util_{cfs,rt,dl,irq}() - * cpu_bw_dl() - * - * Where the cfs,rt and dl util numbers are tracked with the same metric and - * synchronized windows and are thus directly comparable. - * - * The cfs,rt,dl utilization are the running times measured with rq->clock_task - * which excludes things like IRQ and steal-time. These latter are then accrued - * in the IRQ utilization. - * - * The DL bandwidth number OTOH is not a measured metric but a value computed - * based on the task model parameters and gives the minimal utilization - * required to meet deadlines. - */ -unsigned long effective_cpu_util(int cpu, unsigned long util_cfs, - unsigned long *min, - unsigned long *max) -{ - unsigned long util, irq, scale; - struct rq *rq = cpu_rq(cpu); - - scale = arch_scale_cpu_capacity(cpu); - - /* - * Early check to see if IRQ/steal time saturates the CPU, can be - * because of inaccuracies in how we track these -- see - * update_irq_load_avg(). - */ - irq = cpu_util_irq(rq); - if (unlikely(irq >= scale)) { - if (min) - *min = scale; - if (max) - *max = scale; - return scale; - } - - if (min) { - /* - * The minimum utilization returns the highest level between: - * - the computed DL bandwidth needed with the IRQ pressure which - * steals time to the deadline task. - * - The minimum performance requirement for CFS and/or RT. - */ - *min = max(irq + cpu_bw_dl(rq), uclamp_rq_get(rq, UCLAMP_MIN)); - - /* - * When an RT task is runnable and uclamp is not used, we must - * ensure that the task will run at maximum compute capacity. - */ - if (!uclamp_is_used() && rt_rq_is_runnable(&rq->rt)) - *min = max(*min, scale); - } - - /* - * Because the time spend on RT/DL tasks is visible as 'lost' time to - * CFS tasks and we use the same metric to track the effective - * utilization (PELT windows are synchronized) we can directly add them - * to obtain the CPU's actual utilization. - */ - util = util_cfs + cpu_util_rt(rq); - util += cpu_util_dl(rq); - - /* - * The maximum hint is a soft bandwidth requirement, which can be lower - * than the actual utilization because of uclamp_max requirements. - */ - if (max) - *max = min(scale, uclamp_rq_get(rq, UCLAMP_MAX)); - - if (util >= scale) - return scale; - - /* - * There is still idle time; further improve the number by using the - * IRQ metric. Because IRQ/steal time is hidden from the task clock we - * need to scale the task numbers: - * - * max - irq - * U' = irq + --------- * U - * max - */ - util = scale_irq_capacity(util, irq, scale); - util += irq; - - return min(scale, util); -} - -unsigned long sched_cpu_util(int cpu) -{ - return effective_cpu_util(cpu, cpu_util_cfs(cpu), NULL, NULL); -} -#endif /* CONFIG_SMP */ - /** * find_process_by_pid - find a process with a matching PID value. * @pid: the pid in question. -- cgit v1.2.3 From 84d265281d6cea65353fc24146280e0d86ac50cb Mon Sep 17 00:00:00 2001 From: Chen Yu Date: Tue, 27 Aug 2024 19:26:07 +0800 Subject: sched/pelt: Use rq_clock_task() for hw_pressure commit 97450eb90965 ("sched/pelt: Remove shift of thermal clock") removed the decay_shift for hw_pressure. This commit uses the sched_clock_task() in sched_tick() while it replaces the sched_clock_task() with rq_clock_pelt() in __update_blocked_others(). This could bring inconsistence. One possible scenario I can think of is in ___update_load_sum(): u64 delta = now - sa->last_update_time 'now' could be calculated by rq_clock_pelt() from __update_blocked_others(), and last_update_time was calculated by rq_clock_task() previously from sched_tick(). Usually the former chases after the latter, it cause a very large 'delta' and brings unexpected behavior. Fixes: 97450eb90965 ("sched/pelt: Remove shift of thermal clock") Signed-off-by: Chen Yu Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Hongyan Xia Reviewed-by: Vincent Guittot Link: https://lkml.kernel.org/r/20240827112607.181206-1-yu.c.chen@intel.com --- kernel/sched/fair.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 9e19009da48f..e946ca0b1ecd 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9719,9 +9719,10 @@ static bool __update_blocked_others(struct rq *rq, bool *done) hw_pressure = arch_scale_hw_pressure(cpu_of(rq)); + /* hw_pressure doesn't care about invariance */ decayed = update_rt_rq_load_avg(now, rq, curr_class == &rt_sched_class) | update_dl_rq_load_avg(now, rq, curr_class == &dl_sched_class) | - update_hw_load_avg(now, rq, hw_pressure) | + update_hw_load_avg(rq_clock_task(rq), rq, hw_pressure) | update_irq_load_avg(rq, 0); if (others_have_blocked(rq)) -- cgit v1.2.3 From 6b9ccbc033cf179956a37fef3ee415bdc3029d2f Mon Sep 17 00:00:00 2001 From: Chen Yu Date: Tue, 27 Aug 2024 19:23:08 +0800 Subject: kthread: Fix task state in kthread worker if being frozen When analyzing a kernel waring message, Peter pointed out that there is a race condition when the kworker is being frozen and falls into try_to_freeze() with TASK_INTERRUPTIBLE, which could trigger a might_sleep() warning in try_to_freeze(). Although the root cause is not related to freeze()[1], it is still worthy to fix this issue ahead. One possible race scenario: CPU 0 CPU 1 ----- ----- // kthread_worker_fn set_current_state(TASK_INTERRUPTIBLE); suspend_freeze_processes() freeze_processes static_branch_inc(&freezer_active); freeze_kernel_threads pm_nosig_freezing = true; if (work) { //false __set_current_state(TASK_RUNNING); } else if (!freezing(current)) //false, been frozen freezing(): if (static_branch_unlikely(&freezer_active)) if (pm_nosig_freezing) return true; schedule() } // state is still TASK_INTERRUPTIBLE try_to_freeze() might_sleep() <--- warning Fix this by explicitly set the TASK_RUNNING before entering try_to_freeze(). Fixes: b56c0d8937e6 ("kthread: implement kthread_worker") Suggested-by: Peter Zijlstra Suggested-by: Andrew Morton Signed-off-by: Chen Yu Signed-off-by: Peter Zijlstra (Intel) Link: https://lore.kernel.org/lkml/Zs2ZoAcUsZMX2B%2FI@chenyu5-mobl2/ [1] --- kernel/kthread.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/kernel/kthread.c b/kernel/kthread.c index f7be976ff88a..db4ceb0f503c 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -845,8 +845,16 @@ repeat: * event only cares about the address. */ trace_sched_kthread_work_execute_end(work, func); - } else if (!freezing(current)) + } else if (!freezing(current)) { schedule(); + } else { + /* + * Handle the case where the current remains + * TASK_INTERRUPTIBLE. try_to_freeze() expects + * the current to be TASK_RUNNING. + */ + __set_current_state(TASK_RUNNING); + } try_to_freeze(); cond_resched(); -- cgit v1.2.3 From 729288bc68560b4d5b094cb7a6f794c752ef22a2 Mon Sep 17 00:00:00 2001 From: Dietmar Eggemann Date: Thu, 5 Sep 2024 00:05:23 +0200 Subject: kernel/sched: Fix util_est accounting for DELAY_DEQUEUE Remove delayed tasks from util_est even they are runnable. Exclude delayed task which are (a) migrating between rq's or (b) in a SAVE/RESTORE dequeue/enqueue. Signed-off-by: Dietmar Eggemann Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/c49ef5fe-a909-43f1-b02f-a765ab9cedbf@arm.com --- kernel/sched/fair.c | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e946ca0b1ecd..922d69031661 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6948,18 +6948,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) int rq_h_nr_running = rq->cfs.h_nr_running; u64 slice = 0; - if (flags & ENQUEUE_DELAYED) { - requeue_delayed_entity(se); - return; - } - /* * The code below (indirectly) updates schedutil which looks at * the cfs_rq utilization to select a frequency. * Let's add the task's estimated utilization to the cfs_rq's * estimated utilization, before we update schedutil. */ - util_est_enqueue(&rq->cfs, p); + if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & ENQUEUE_RESTORE)))) + util_est_enqueue(&rq->cfs, p); + + if (flags & ENQUEUE_DELAYED) { + requeue_delayed_entity(se); + return; + } /* * If in_iowait is set, the code below may not trigger any cpufreq @@ -7177,7 +7178,8 @@ static int dequeue_entities(struct rq *rq, struct sched_entity *se, int flags) */ static bool dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) { - util_est_dequeue(&rq->cfs, p); + if (!(p->se.sched_delayed && (task_on_rq_migrating(p) || (flags & DEQUEUE_SAVE)))) + util_est_dequeue(&rq->cfs, p); if (dequeue_entities(rq, &p->se, flags) < 0) { util_est_update(&rq->cfs, p, DEQUEUE_SLEEP); -- cgit v1.2.3 From c662e2b1e8cfc3b6329704dab06051f8c3ec2993 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 5 Sep 2024 17:02:24 +0200 Subject: sched: Fix sched_delayed vs sched_core Completely analogous to commit dfa0a574cbc4 ("sched/uclamg: Handle delayed dequeue"), avoid double dequeue for the sched_core entries. Fixes: 152e11f6df29 ("sched/fair: Implement delayed dequeue") Signed-off-by: Peter Zijlstra (Intel) --- kernel/sched/core.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 2922facebae7..b4c5d83e54d4 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -259,6 +259,9 @@ static inline int rb_sched_core_cmp(const void *key, const struct rb_node *node) void sched_core_enqueue(struct rq *rq, struct task_struct *p) { + if (p->se.sched_delayed) + return; + rq->core->core_task_seq++; if (!p->core_cookie) @@ -269,6 +272,9 @@ void sched_core_enqueue(struct rq *rq, struct task_struct *p) void sched_core_dequeue(struct rq *rq, struct task_struct *p, int flags) { + if (p->se.sched_delayed) + return; + rq->core->core_task_seq++; if (sched_core_enqueued(p)) { -- cgit v1.2.3 From 2cab4bd024d23f658e40dce209dfd012f4e8b19a Mon Sep 17 00:00:00 2001 From: Huang Shijie Date: Fri, 6 Sep 2024 13:30:19 +0800 Subject: sched/debug: Fix the runnable tasks output The current runnable tasks output looks like: runnable tasks: S task PID tree-key switches prio wait-time sum-exec sum-sleep ------------------------------------------------------------------------------------------------------------- Ikworker/R-rcu_g 4 0.129049 E 0.620179 0.750000 0.002920 2 100 0.000000 0.002920 0.000000 0.000000 0 0 / Ikworker/R-sync_ 5 0.125328 E 0.624147 0.750000 0.001840 2 100 0.000000 0.001840 0.000000 0.000000 0 0 / Ikworker/R-slub_ 6 0.120835 E 0.628680 0.750000 0.001800 2 100 0.000000 0.001800 0.000000 0.000000 0 0 / Ikworker/R-netns 7 0.114294 E 0.634701 0.750000 0.002400 2 100 0.000000 0.002400 0.000000 0.000000 0 0 / I kworker/0:1 9 508.781746 E 511.754666 3.000000 151.575240 224 120 0.000000 151.575240 0.000000 0.000000 0 0 / Which is messy. Remove the duplicate printing of sum_exec_runtime and tidy up the layout to make it look like: runnable tasks: S task PID vruntime eligible deadline slice sum-exec switches prio wait-time sum-sleep sum-block node group-id group-path ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- I kworker/0:3 1698 295.001459 E 297.977619 3.000000 38.862920 9 120 0.000000 0.000000 0.000000 0 0 / I kworker/0:4 1702 278.026303 E 281.026303 3.000000 9.918760 3 120 0.000000 0.000000 0.000000 0 0 / S NetworkManager 2646 0.377936 E 2.598104 3.000000 98.535880 314 120 0.000000 0.000000 0.000000 0 0 /system.slice/NetworkManager.service S virtqemud 2689 0.541016 E 2.440104 3.000000 50.967960 80 120 0.000000 0.000000 0.000000 0 0 /system.slice/virtqemud.service S gsd-smartcard 3058 73.604144 E 76.475904 3.000000 74.033320 88 120 0.000000 0.000000 0.000000 0 0 /user.slice/user-42.slice/session-c1.scope Reviewed-by: Christoph Lameter (Ampere) Signed-off-by: Huang Shijie Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20240906053019.7874-1-shijie@os.amperecomputing.com --- kernel/sched/debug.c | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c index 01ce9a76164c..de1dc5264b3f 100644 --- a/kernel/sched/debug.c +++ b/kernel/sched/debug.c @@ -739,7 +739,7 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) else SEQ_printf(m, " %c", task_state_to_char(p)); - SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ", + SEQ_printf(m, " %15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ", p->comm, task_pid_nr(p), SPLIT_NS(p->se.vruntime), entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N', @@ -750,17 +750,16 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) (long long)(p->nvcsw + p->nivcsw), p->prio); - SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld %9lld.%06ld", + SEQ_printf(m, "%9lld.%06ld %9lld.%06ld %9lld.%06ld", SPLIT_NS(schedstat_val_or_zero(p->stats.wait_sum)), - SPLIT_NS(p->se.sum_exec_runtime), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_sleep_runtime)), SPLIT_NS(schedstat_val_or_zero(p->stats.sum_block_runtime))); #ifdef CONFIG_NUMA_BALANCING - SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); + SEQ_printf(m, " %d %d", task_node(p), task_numa_group_id(p)); #endif #ifdef CONFIG_CGROUP_SCHED - SEQ_printf_task_group_path(m, task_group(p), " %s") + SEQ_printf_task_group_path(m, task_group(p), " %s") #endif SEQ_printf(m, "\n"); @@ -772,10 +771,26 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) SEQ_printf(m, "\n"); SEQ_printf(m, "runnable tasks:\n"); - SEQ_printf(m, " S task PID tree-key switches prio" - " wait-time sum-exec sum-sleep\n"); + SEQ_printf(m, " S task PID vruntime eligible " + "deadline slice sum-exec switches " + "prio wait-time sum-sleep sum-block" +#ifdef CONFIG_NUMA_BALANCING + " node group-id" +#endif +#ifdef CONFIG_CGROUP_SCHED + " group-path" +#endif + "\n"); SEQ_printf(m, "-------------------------------------------------------" - "------------------------------------------------------\n"); + "------------------------------------------------------" + "------------------------------------------------------" +#ifdef CONFIG_NUMA_BALANCING + "--------------" +#endif +#ifdef CONFIG_CGROUP_SCHED + "--------------" +#endif + "\n"); rcu_read_lock(); for_each_process_thread(g, p) { -- cgit v1.2.3 From 8bee4ca5bd64afa6a08c40db621d3c6336685bcf Mon Sep 17 00:00:00 2001 From: Christian Loehle Date: Tue, 13 Aug 2024 15:43:45 +0100 Subject: sched/deadline: Convert schedtool example to chrt chrt has SCHED_DEADLINE support so convert the example instead of relying on a schedtool fork. While at it fix the wrong mentioning of microseconds, it was nanoseconds for both schedtool and chrt. Signed-off-by: Christian Loehle Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240813144348.1180344-2-christian.loehle@arm.com --- Documentation/scheduler/sched-deadline.rst | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/Documentation/scheduler/sched-deadline.rst b/Documentation/scheduler/sched-deadline.rst index 9fe4846079bb..22838ed8e13a 100644 --- a/Documentation/scheduler/sched-deadline.rst +++ b/Documentation/scheduler/sched-deadline.rst @@ -749,21 +749,19 @@ Appendix A. Test suite of the command line options. Please refer to rt-app documentation for more details (`/doc/*.json`). - The second testing application is a modification of schedtool, called - schedtool-dl, which can be used to setup SCHED_DEADLINE parameters for a - certain pid/application. schedtool-dl is available at: - https://github.com/scheduler-tools/schedtool-dl.git. + The second testing application is done using chrt which has support + for SCHED_DEADLINE. The usage is straightforward:: - # schedtool -E -t 10000000:100000000 -e ./my_cpuhog_app + # chrt -d -T 10000000 -D 100000000 0 ./my_cpuhog_app With this, my_cpuhog_app is put to run inside a SCHED_DEADLINE reservation - of 10ms every 100ms (note that parameters are expressed in microseconds). - You can also use schedtool to create a reservation for an already running + of 10ms every 100ms (note that parameters are expressed in nanoseconds). + You can also use chrt to create a reservation for an already running application, given that you know its pid:: - # schedtool -E -t 10000000:100000000 my_app_pid + # chrt -d -T 10000000 -D 100000000 -p 0 my_app_pid Appendix B. Minimal main() ========================== -- cgit v1.2.3 From 6ebf2d021a13a77b495b4de15c6834e26b80d08e Mon Sep 17 00:00:00 2001 From: Christian Loehle Date: Tue, 13 Aug 2024 15:43:46 +0100 Subject: sched/deadline: Clarify nanoseconds in uapi Specify the time values of the deadline parameters of deadline, runtime, and period as being in nanoseconds explicitly as they always have been. Signed-off-by: Christian Loehle Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240813144348.1180344-3-christian.loehle@arm.com --- include/uapi/linux/sched/types.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/sched/types.h b/include/uapi/linux/sched/types.h index 90662385689b..bf6e9ae031c1 100644 --- a/include/uapi/linux/sched/types.h +++ b/include/uapi/linux/sched/types.h @@ -58,9 +58,9 @@ * * This is reflected by the following fields of the sched_attr structure: * - * @sched_deadline representative of the task's deadline - * @sched_runtime representative of the task's runtime - * @sched_period representative of the task's period + * @sched_deadline representative of the task's deadline in nanoseconds + * @sched_runtime representative of the task's runtime in nanoseconds + * @sched_period representative of the task's period in nanoseconds * * Given this task model, there are a multiplicity of scheduling algorithms * and policies, that can be used to ensure all the tasks will make their -- cgit v1.2.3 From 4eb71e3b4550593d9a862cc0aa754fad98679783 Mon Sep 17 00:00:00 2001 From: Christian Loehle Date: Tue, 13 Aug 2024 15:43:47 +0100 Subject: cpufreq/cppc: Use NSEC_PER_MSEC for deadline task Convert the cppc deadline task attributes to use the available definitions to make them more readable. No functional change. Signed-off-by: Christian Loehle Signed-off-by: Peter Zijlstra (Intel) Acked-by: Juri Lelli Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240813144348.1180344-4-christian.loehle@arm.com --- drivers/cpufreq/cppc_cpufreq.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/cpufreq/cppc_cpufreq.c b/drivers/cpufreq/cppc_cpufreq.c index bafa32dd375d..1a5ad184d28f 100644 --- a/drivers/cpufreq/cppc_cpufreq.c +++ b/drivers/cpufreq/cppc_cpufreq.c @@ -224,9 +224,9 @@ static void __init cppc_freq_invariance_init(void) * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. */ - .sched_runtime = 1000000, - .sched_deadline = 10000000, - .sched_period = 10000000, + .sched_runtime = NSEC_PER_MSEC, + .sched_deadline = 10 * NSEC_PER_MSEC, + .sched_period = 10 * NSEC_PER_MSEC, }; int ret; -- cgit v1.2.3 From bc9057da1a220ff2cb6c8885fd5352558aceba2c Mon Sep 17 00:00:00 2001 From: Christian Loehle Date: Tue, 13 Aug 2024 15:43:48 +0100 Subject: sched/cpufreq: Use NSEC_PER_MSEC for deadline task Convert the sugov deadline task attributes to use the available definitions to make them more readable. No functional change. Signed-off-by: Christian Loehle Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Vincent Guittot Acked-by: Juri Lelli Acked-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/20240813144348.1180344-5-christian.loehle@arm.com --- kernel/sched/cpufreq_schedutil.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index eece6244f9d2..43111a515a28 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c @@ -654,9 +654,9 @@ static int sugov_kthread_create(struct sugov_policy *sg_policy) * Fake (unused) bandwidth; workaround to "fix" * priority inheritance. */ - .sched_runtime = 1000000, - .sched_deadline = 10000000, - .sched_period = 10000000, + .sched_runtime = NSEC_PER_MSEC, + .sched_deadline = 10 * NSEC_PER_MSEC, + .sched_period = 10 * NSEC_PER_MSEC, }; struct cpufreq_policy *policy = sg_policy->policy; int ret; -- cgit v1.2.3