summaryrefslogtreecommitdiff
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2022-01-23 18:35:27 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2022-01-23 18:35:27 +0300
commit10c64a0f280636652ec63bb1ddd34b6c8e2f5584 (patch)
tree00a69b7b5b5ffc4812c914982f4b82c4b0382d7d /include/linux/sched.h
parent0f9e04221227302d665bf64c69e5308360f4f88a (diff)
parent0e3872499de1a1230cef5221607d71aa09264bd5 (diff)
downloadlinux-10c64a0f280636652ec63bb1ddd34b6c8e2f5584.tar.xz
Merge tag 'sched_urgent_for_v5.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Borislav Petkov: "A bunch of fixes: forced idle time accounting, utilization values propagation in the sched hierarchies and other minor cleanups and improvements" * tag 'sched_urgent_for_v5.17_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: kernel/sched: Remove dl_boosted flag comment sched: Avoid double preemption in __cond_resched_*lock*() sched/fair: Fix all kernel-doc warnings sched/core: Accounting forceidle time for all tasks except idle task sched/pelt: Relax the sync of load_sum with load_avg sched/pelt: Relax the sync of runnable_sum with runnable_avg sched/pelt: Continue to relax the sync of util_sum with util_avg sched/pelt: Relax the sync of util_sum with util_avg psi: Fix uaf issue when psi trigger is destroyed while being polled
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h4
1 files changed, 0 insertions, 4 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 508b91d57470..f5b2be39a78c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -619,10 +619,6 @@ struct sched_dl_entity {
* task has to wait for a replenishment to be performed at the
* next firing of dl_timer.
*
- * @dl_boosted tells if we are boosted due to DI. If so we are
- * outside bandwidth enforcement mechanism (but only until we
- * exit the critical section);
- *
* @dl_yielded tells if task gave up the CPU before consuming
* all its available runtime during the last job.
*