summaryrefslogtreecommitdiff
path: root/drivers/clocksource
diff options
context:
space:
mode:
authorTianyu Lan <Tianyu.Lan@microsoft.com>2019-08-14 15:32:16 +0300
committerThomas Gleixner <tglx@linutronix.de>2019-08-23 17:59:54 +0300
commitbd00cd52d5be655a2f217e2ed74b91a71cb2b14f (patch)
tree90b8fe8d71a849f6066d1c2ba7500dba0fca9819 /drivers/clocksource
parentadb87ff4f96c9700718e09c97a804124d5cd61ff (diff)
downloadlinux-bd00cd52d5be655a2f217e2ed74b91a71cb2b14f.tar.xz
clocksource/drivers/hyperv: Add Hyper-V specific sched clock function
Hyper-V guests use the default native_sched_clock() in pv_ops.time.sched_clock on x86. But native_sched_clock() directly uses the raw TSC value, which can be discontinuous in a Hyper-V VM. Add the generic hv_setup_sched_clock() to set the sched clock function appropriately. On x86, this sets pv_ops.time.sched_clock to read the Hyper-V reference TSC value that is scaled and adjusted to be continuous. Also move the Hyper-V reference TSC initialization much earlier in the boot process so no discontinuity is observed when pv_ops.time.sched_clock calculates its offset. [ tglx: Folded build fix ] Signed-off-by: Tianyu Lan <Tianyu.Lan@microsoft.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Michael Kelley <mikelley@microsoft.com> Link: https://lkml.kernel.org/r/20190814123216.32245-3-Tianyu.Lan@microsoft.com
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/hyperv_timer.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c
index 432aa331df04..c322ab4d3689 100644
--- a/drivers/clocksource/hyperv_timer.c
+++ b/drivers/clocksource/hyperv_timer.c
@@ -22,6 +22,7 @@
#include <asm/mshyperv.h>
static struct clock_event_device __percpu *hv_clock_event;
+static u64 hv_sched_clock_offset __ro_after_init;
/*
* If false, we're using the old mechanism for stimer0 interrupts
@@ -222,7 +223,7 @@ struct ms_hyperv_tsc_page *hv_get_tsc_page(void)
}
EXPORT_SYMBOL_GPL(hv_get_tsc_page);
-static u64 notrace read_hv_sched_clock_tsc(void)
+static u64 notrace read_hv_clock_tsc(struct clocksource *arg)
{
u64 current_tick = hv_read_tsc_page(&tsc_pg);
@@ -232,9 +233,9 @@ static u64 notrace read_hv_sched_clock_tsc(void)
return current_tick;
}
-static u64 read_hv_clock_tsc(struct clocksource *arg)
+static u64 read_hv_sched_clock_tsc(void)
{
- return read_hv_sched_clock_tsc();
+ return read_hv_clock_tsc(NULL) - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_tsc = {
@@ -246,7 +247,7 @@ static struct clocksource hyperv_cs_tsc = {
};
#endif
-static u64 notrace read_hv_sched_clock_msr(void)
+static u64 notrace read_hv_clock_msr(struct clocksource *arg)
{
u64 current_tick;
/*
@@ -258,9 +259,9 @@ static u64 notrace read_hv_sched_clock_msr(void)
return current_tick;
}
-static u64 read_hv_clock_msr(struct clocksource *arg)
+static u64 read_hv_sched_clock_msr(void)
{
- return read_hv_sched_clock_msr();
+ return read_hv_clock_msr(NULL) - hv_sched_clock_offset;
}
static struct clocksource hyperv_cs_msr = {
@@ -298,8 +299,9 @@ static bool __init hv_init_tsc_clocksource(void)
hv_set_clocksource_vdso(hyperv_cs_tsc);
clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100);
- /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
- sched_clock_register(read_hv_sched_clock_tsc, 64, HV_CLOCK_HZ);
+ hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_setup_sched_clock(read_hv_sched_clock_tsc);
+
return true;
}
#else
@@ -329,7 +331,7 @@ void __init hv_init_clocksource(void)
hyperv_cs = &hyperv_cs_msr;
clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100);
- /* sched_clock_register is needed on ARM64 but is a no-op on x86 */
- sched_clock_register(read_hv_sched_clock_msr, 64, HV_CLOCK_HZ);
+ hv_sched_clock_offset = hyperv_cs->read(hyperv_cs);
+ hv_setup_sched_clock(read_hv_sched_clock_msr);
}
EXPORT_SYMBOL_GPL(hv_init_clocksource);