diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-12-21 12:01:24 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-03-09 09:59:16 +0300 |
commit | a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b (patch) | |
tree | 907cfb61b5233db6935d234e9de615796d137acf /kernel/sched/fair.c | |
parent | 8f111bc357aa811e0bb5fdfe34c4c9efdafc15b9 (diff) | |
download | linux-a22e47a4e3f5a9e50a827c5d94705ace3b1eac0b.tar.xz |
sched/core: Convert nohz_flags to atomic_t
Using atomic_t allows us to use the more flexible bitops provided
there. Also its smaller.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/fair.c')
-rw-r--r-- | kernel/sched/fair.c | 23 |
1 files changed, 15 insertions, 8 deletions
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 097db34d5ba2..5d150478dd58 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -9072,6 +9072,7 @@ static inline int find_new_ilb(void) */ static void nohz_balancer_kick(void) { + unsigned int flags; int ilb_cpu; nohz.next_balance++; @@ -9081,7 +9082,8 @@ static void nohz_balancer_kick(void) if (ilb_cpu >= nr_cpu_ids) return; - if (test_and_set_bit(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu))) + flags = atomic_fetch_or(NOHZ_BALANCE_KICK, nohz_flags(ilb_cpu)); + if (flags & NOHZ_BALANCE_KICK) return; /* * Use smp_send_reschedule() instead of resched_cpu(). @@ -9095,7 +9097,9 @@ static void nohz_balancer_kick(void) void nohz_balance_exit_idle(unsigned int cpu) { - if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { + unsigned int flags = atomic_read(nohz_flags(cpu)); + + if (unlikely(flags & NOHZ_TICK_STOPPED)) { /* * Completely isolated CPUs don't ever set, so we must test. */ @@ -9103,7 +9107,8 @@ void nohz_balance_exit_idle(unsigned int cpu) cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); atomic_dec(&nohz.nr_cpus); } - clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + + atomic_andnot(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } } @@ -9155,7 +9160,7 @@ void nohz_balance_enter_idle(int cpu) if (!housekeeping_cpu(cpu, HK_FLAG_SCHED)) return; - if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu))) + if (atomic_read(nohz_flags(cpu)) & NOHZ_TICK_STOPPED) return; /* If we're a completely isolated CPU, we don't play: */ @@ -9164,7 +9169,7 @@ void nohz_balance_enter_idle(int cpu) cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); - set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); + atomic_or(NOHZ_TICK_STOPPED, nohz_flags(cpu)); } #endif @@ -9302,8 +9307,10 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) unsigned long next_balance = jiffies + 60*HZ; int update_next_balance = 0; - if (idle != CPU_IDLE || - !test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu))) + if (!(atomic_read(nohz_flags(this_cpu)) & NOHZ_BALANCE_KICK)) + return; + + if (idle != CPU_IDLE) goto end; for_each_cpu(balance_cpu, nohz.idle_cpus_mask) { @@ -9349,7 +9356,7 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle) if (likely(update_next_balance)) nohz.next_balance = next_balance; end: - clear_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); + atomic_andnot(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)); } /* |