diff options
author | Vincent Guittot <vincent.guittot@linaro.org> | 2021-02-24 16:30:07 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2021-03-06 14:40:22 +0300 |
commit | 39b6a429c30482c349f1bb3746470fe473cbdb0f (patch) | |
tree | 5abc6053e719b9429dd5390c483651ca4703cca4 /kernel/sched | |
parent | c6f886546cb8a38617cdbe755fe50d3acd2463e4 (diff) | |
download | linux-39b6a429c30482c349f1bb3746470fe473cbdb0f.tar.xz |
sched/fair: Reduce the window for duplicated update
Start to update last_blocked_load_update_tick to reduce the possibility
of another cpu starting the update one more time
Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Reviewed-by: Valentin Schneider <valentin.schneider@arm.com>
Link: https://lkml.kernel.org/r/20210224133007.28644-8-vincent.guittot@linaro.org
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/fair.c | 11 |
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e87e1b3bcdca..f1b55f9a085d 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -7852,16 +7852,20 @@ static inline bool others_have_blocked(struct rq *rq) return false; } -static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) +static inline void update_blocked_load_tick(struct rq *rq) { - rq->last_blocked_load_update_tick = jiffies; + WRITE_ONCE(rq->last_blocked_load_update_tick, jiffies); +} +static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) +{ if (!has_blocked) rq->has_blocked_load = 0; } #else static inline bool cfs_rq_has_blocked(struct cfs_rq *cfs_rq) { return false; } static inline bool others_have_blocked(struct rq *rq) { return false; } +static inline void update_blocked_load_tick(struct rq *rq) {} static inline void update_blocked_load_status(struct rq *rq, bool has_blocked) {} #endif @@ -8022,6 +8026,7 @@ static void update_blocked_averages(int cpu) struct rq_flags rf; rq_lock_irqsave(rq, &rf); + update_blocked_load_tick(rq); update_rq_clock(rq); decayed |= __update_blocked_others(rq, &done); @@ -8363,7 +8368,7 @@ static bool update_nohz_stats(struct rq *rq) if (!cpumask_test_cpu(cpu, nohz.idle_cpus_mask)) return false; - if (!time_after(jiffies, rq->last_blocked_load_update_tick)) + if (!time_after(jiffies, READ_ONCE(rq->last_blocked_load_update_tick))) return true; update_blocked_averages(cpu); |