summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAruna Ramakrishna <aruna.ramakrishna@oracle.com>2025-07-09 20:33:28 +0300
committerPeter Zijlstra <peterz@infradead.org>2025-07-14 11:59:31 +0300
commit36569780b0d64de283f9d6c2195fd1a43e221ee8 (patch)
tree94292b3c406e76d645dbc9e2bd0118240ed9c00d
parent347e9f5043c89695b01e66b3ed111755afcf1911 (diff)
downloadlinux-36569780b0d64de283f9d6c2195fd1a43e221ee8.tar.xz
sched: Change nr_uninterruptible type to unsigned long
The commit e6fe3f422be1 ("sched: Make multiple runqueue task counters 32-bit") changed nr_uninterruptible to an unsigned int. But the nr_uninterruptible values for each of the CPU runqueues can grow to large numbers, sometimes exceeding INT_MAX. This is valid, if, over time, a large number of tasks are migrated off of one CPU after going into an uninterruptible state. Only the sum of all nr_interruptible values across all CPUs yields the correct result, as explained in a comment in kernel/sched/loadavg.c. Change the type of nr_uninterruptible back to unsigned long to prevent overflows, and thus the miscalculation of load average. Fixes: e6fe3f422be1 ("sched: Make multiple runqueue task counters 32-bit") Signed-off-by: Aruna Ramakrishna <aruna.ramakrishna@oracle.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lkml.kernel.org/r/20250709173328.606794-1-aruna.ramakrishna@oracle.com
-rw-r--r--kernel/sched/loadavg.c2
-rw-r--r--kernel/sched/sched.h2
2 files changed, 2 insertions, 2 deletions
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index c48900b856a2..52ca8e268cfc 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -80,7 +80,7 @@ long calc_load_fold_active(struct rq *this_rq, long adjust)
long nr_active, delta = 0;
nr_active = this_rq->nr_running - adjust;
- nr_active += (int)this_rq->nr_uninterruptible;
+ nr_active += (long)this_rq->nr_uninterruptible;
if (nr_active != this_rq->calc_load_active) {
delta = nr_active - this_rq->calc_load_active;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 475bb5998295..83e3aa917142 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1149,7 +1149,7 @@ struct rq {
* one CPU and if it got migrated afterwards it may decrease
* it on another CPU. Always updated under the runqueue lock:
*/
- unsigned int nr_uninterruptible;
+ unsigned long nr_uninterruptible;
union {
struct task_struct __rcu *donor; /* Scheduler context */