diff options
author | Peter Zijlstra <peterz@infradead.org> | 2024-08-14 01:25:54 +0300 |
---|---|---|
committer | Peter Zijlstra <peterz@infradead.org> | 2024-09-03 16:26:31 +0300 |
commit | 436f3eed5c69c1048a5754df6e3dbb291e5cccbd (patch) | |
tree | bec07e3613426cc839f683bd77be69b3ef86e4af | |
parent | fd03c5b8585562d60f8b597b4332d28f48abfe7d (diff) | |
download | linux-436f3eed5c69c1048a5754df6e3dbb291e5cccbd.tar.xz |
sched: Combine the last put_prev_task() and the first set_next_task()
Ensure the last put_prev_task() and the first set_next_task() always
go together.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20240813224016.158454756@infradead.org
-rw-r--r-- | kernel/sched/core.c | 17 | ||||
-rw-r--r-- | kernel/sched/fair.c | 3 | ||||
-rw-r--r-- | kernel/sched/sched.h | 10 |
3 files changed, 14 insertions, 16 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b9429eb5dbbe..8a1cf93da203 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -5894,8 +5894,7 @@ __pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) /* Assume the next prioritized class is idle_sched_class */ if (!p) { p = pick_task_idle(rq); - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); } /* @@ -5926,8 +5925,7 @@ restart: } else { p = class->pick_task(rq); if (p) { - put_prev_task(rq, prev); - set_next_task_first(rq, p); + put_prev_set_next_task(rq, prev, p); return p; } } @@ -6016,13 +6014,8 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) WRITE_ONCE(rq->core_sched_seq, rq->core->core_pick_seq); next = rq->core_pick; - if (next != prev) { - put_prev_task(rq, prev); - set_next_task_first(rq, next); - } - rq->core_pick = NULL; - goto out; + goto out_set_next; } prev_balance(rq, prev, rf); @@ -6192,9 +6185,7 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) } out_set_next: - put_prev_task(rq, prev); - set_next_task_first(rq, next); -out: + put_prev_set_next_task(rq, prev, next); if (rq->core->core_forceidle_count && next == rq->idle) queue_core_balance(rq); diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 53556b08edef..c5b7873dcc30 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -8819,8 +8819,7 @@ again: simple: #endif - put_prev_task(rq, prev); - set_next_task_fair(rq, p, true); + put_prev_set_next_task(rq, prev, p); return p; idle: diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 64a4ed758ba1..aae35818cca4 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -2370,8 +2370,16 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next) next->sched_class->set_next_task(rq, next, false); } -static inline void set_next_task_first(struct rq *rq, struct task_struct *next) +static inline void put_prev_set_next_task(struct rq *rq, + struct task_struct *prev, + struct task_struct *next) { + WARN_ON_ONCE(rq->curr != prev); + + if (next == prev) + return; + + prev->sched_class->put_prev_task(rq, prev); next->sched_class->set_next_task(rq, next, true); } |