summaryrefslogtreecommitdiff
path: root/kernel/sched
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2018-07-19 23:55:43 +0300
committerThomas Gleixner <tglx@linutronix.de>2018-07-20 01:02:43 +0300
commit46457ea464f5341d1f9dad8dd213805d45f7f117 (patch)
tree316f05b91ec512ef269e8bdd0cc89e7ea062e0f3 /kernel/sched
parent857baa87b6422bcfb84ed3631d6839920cb5b09d (diff)
downloadlinux-46457ea464f5341d1f9dad8dd213805d45f7f117.tar.xz
sched/clock: Use static key for sched_clock_running
sched_clock_running may be read every time sched_clock_cpu() is called. Yet, this variable is updated only twice during boot, and never changes again, therefore it is better to make it a static key. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: Peter Zijlstra <peterz@infradead.org> Cc: steven.sistare@oracle.com Cc: daniel.m.jordan@oracle.com Cc: linux@armlinux.org.uk Cc: schwidefsky@de.ibm.com Cc: heiko.carstens@de.ibm.com Cc: john.stultz@linaro.org Cc: sboyd@codeaurora.org Cc: hpa@zytor.com Cc: douly.fnst@cn.fujitsu.com Cc: prarit@redhat.com Cc: feng.tang@intel.com Cc: pmladek@suse.com Cc: gnomes@lxorguk.ukuu.org.uk Cc: linux-s390@vger.kernel.org Cc: boris.ostrovsky@oracle.com Cc: jgross@suse.com Cc: pbonzini@redhat.com Link: https://lkml.kernel.org/r/20180719205545.16512-25-pasha.tatashin@oracle.com
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/clock.c16
-rw-r--r--kernel/sched/debug.c2
2 files changed, 8 insertions, 10 deletions
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 422cd63f8f17..c5c47ad3f386 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -67,7 +67,7 @@ unsigned long long __weak sched_clock(void)
}
EXPORT_SYMBOL_GPL(sched_clock);
-__read_mostly int sched_clock_running;
+static DEFINE_STATIC_KEY_FALSE(sched_clock_running);
#ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
/*
@@ -191,7 +191,7 @@ void clear_sched_clock_stable(void)
smp_mb(); /* matches sched_clock_init_late() */
- if (sched_clock_running == 2)
+ if (static_key_count(&sched_clock_running.key) == 2)
__clear_sched_clock_stable();
}
@@ -215,7 +215,7 @@ void __init sched_clock_init(void)
__sched_clock_gtod_offset();
local_irq_restore(flags);
- sched_clock_running = 1;
+ static_branch_inc(&sched_clock_running);
/* Now that sched_clock_running is set adjust scd */
local_irq_save(flags);
@@ -228,7 +228,7 @@ void __init sched_clock_init(void)
*/
static int __init sched_clock_init_late(void)
{
- sched_clock_running = 2;
+ static_branch_inc(&sched_clock_running);
/*
* Ensure that it is impossible to not do a static_key update.
*
@@ -373,7 +373,7 @@ u64 sched_clock_cpu(int cpu)
if (sched_clock_stable())
return sched_clock() + __sched_clock_offset;
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return sched_clock();
preempt_disable_notrace();
@@ -396,7 +396,7 @@ void sched_clock_tick(void)
if (sched_clock_stable())
return;
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return;
lockdep_assert_irqs_disabled();
@@ -455,13 +455,13 @@ EXPORT_SYMBOL_GPL(sched_clock_idle_wakeup_event);
void __init sched_clock_init(void)
{
- sched_clock_running = 1;
+ static_branch_inc(&sched_clock_running);
generic_sched_clock_init();
}
u64 sched_clock_cpu(int cpu)
{
- if (unlikely(!sched_clock_running))
+ if (!static_branch_unlikely(&sched_clock_running))
return 0;
return sched_clock();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index e593b4118578..b0212f489a33 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -623,8 +623,6 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
#undef PU
}
-extern __read_mostly int sched_clock_running;
-
static void print_cpu(struct seq_file *m, int cpu)
{
struct rq *rq = cpu_rq(cpu);