diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 04:46:42 +0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 04:46:42 +0400 |
commit | e7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f (patch) | |
tree | daa51c16462c318b890acf7f01fba5827275dd74 /arch | |
parent | 08d69a25714429850cf9ef71f22d8cdc9189d93f (diff) | |
parent | 953dec21aed4038464fec02f96a2f1b8701a5bce (diff) | |
download | linux-e7fda6c4c3c1a7d6996dd75fd84670fa0b5d448f.tar.xz |
Merge branch 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer and time updates from Thomas Gleixner:
"A rather large update of timers, timekeeping & co
- Core timekeeping code is year-2038 safe now for 32bit machines.
Now we just need to fix all in kernel users and the gazillion of
user space interfaces which rely on timespec/timeval :)
- Better cache layout for the timekeeping internal data structures.
- Proper nanosecond based interfaces for in kernel users.
- Tree wide cleanup of code which wants nanoseconds but does hoops
and loops to convert back and forth from timespecs. Some of it
definitely belongs into the ugly code museum.
- Consolidation of the timekeeping interface zoo.
- A fast NMI safe accessor to clock monotonic for tracing. This is a
long standing request to support correlated user/kernel space
traces. With proper NTP frequency correction it's also suitable
for correlation of traces accross separate machines.
- Checkpoint/restart support for timerfd.
- A few NOHZ[_FULL] improvements in the [hr]timer code.
- Code move from kernel to kernel/time of all time* related code.
- New clocksource/event drivers from the ARM universe. I'm really
impressed that despite an architected timer in the newer chips SoC
manufacturers insist on inventing new and differently broken SoC
specific timers.
[ Ed. "Impressed"? I don't think that word means what you think it means ]
- Another round of code move from arch to drivers. Looks like most
of the legacy mess in ARM regarding timers is sorted out except for
a few obnoxious strongholds.
- The usual updates and fixlets all over the place"
* 'timers-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (114 commits)
timekeeping: Fixup typo in update_vsyscall_old definition
clocksource: document some basic timekeeping concepts
timekeeping: Use cached ntp_tick_length when accumulating error
timekeeping: Rework frequency adjustments to work better w/ nohz
timekeeping: Minor fixup for timespec64->timespec assignment
ftrace: Provide trace clocks monotonic
timekeeping: Provide fast and NMI safe access to CLOCK_MONOTONIC
seqcount: Add raw_write_seqcount_latch()
seqcount: Provide raw_read_seqcount()
timekeeping: Use tk_read_base as argument for timekeeping_get_ns()
timekeeping: Create struct tk_read_base and use it in struct timekeeper
timekeeping: Restructure the timekeeper some more
clocksource: Get rid of cycle_last
clocksource: Move cycle_last validation to core code
clocksource: Make delta calculation a function
wireless: ath9k: Get rid of timespec conversions
drm: vmwgfx: Use nsec based interfaces
drm: i915: Use nsec based interfaces
timekeeping: Provide ktime_get_raw()
hangcheck-timer: Use ktime_get_ns()
...
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arm/Kconfig | 2 | ||||
-rw-r--r-- | arch/arm/common/bL_switcher.c | 16 | ||||
-rw-r--r-- | arch/arm/mach-pxa/Makefile | 2 | ||||
-rw-r--r-- | arch/arm/mach-pxa/generic.c | 11 | ||||
-rw-r--r-- | arch/arm/mach-pxa/time.c | 162 | ||||
-rw-r--r-- | arch/arm64/kernel/vdso.c | 10 | ||||
-rw-r--r-- | arch/hexagon/Kconfig | 1 | ||||
-rw-r--r-- | arch/ia64/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/time.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spu_base.c | 11 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/context.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/file.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 4 | ||||
-rw-r--r-- | arch/s390/Kconfig | 1 | ||||
-rw-r--r-- | arch/s390/kernel/time.c | 16 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 13 | ||||
-rw-r--r-- | arch/tile/kernel/vdso/vgettimeofday.c | 7 | ||||
-rw-r--r-- | arch/x86/Kconfig | 2 | ||||
-rw-r--r-- | arch/x86/kernel/tsc.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/vsyscall_gtod.c | 23 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 62 |
21 files changed, 92 insertions, 288 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 1e14b9068a39..d31c500653a2 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -65,7 +65,6 @@ config ARM select HAVE_UID16 select HAVE_VIRT_CPU_ACCOUNTING_GEN select IRQ_FORCED_THREADING - select KTIME_SCALAR select MODULES_USE_ELF_REL select NO_BOOTMEM select OLD_SIGACTION @@ -648,6 +647,7 @@ config ARCH_PXA select AUTO_ZRELADDR select CLKDEV_LOOKUP select CLKSRC_MMIO + select CLKSRC_OF select GENERIC_CLOCKEVENTS select GPIO_PXA select HAVE_IDE diff --git a/arch/arm/common/bL_switcher.c b/arch/arm/common/bL_switcher.c index 490f3dced749..6eaddc47c43d 100644 --- a/arch/arm/common/bL_switcher.c +++ b/arch/arm/common/bL_switcher.c @@ -58,16 +58,6 @@ static int read_mpidr(void) } /* - * Get a global nanosecond time stamp for tracing. - */ -static s64 get_ns(void) -{ - struct timespec ts; - getnstimeofday(&ts); - return timespec_to_ns(&ts); -} - -/* * bL switcher core code. */ @@ -224,7 +214,7 @@ static int bL_switch_to(unsigned int new_cluster_id) */ local_irq_disable(); local_fiq_disable(); - trace_cpu_migrate_begin(get_ns(), ob_mpidr); + trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr); /* redirect GIC's SGIs to our counterpart */ gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]); @@ -267,7 +257,7 @@ static int bL_switch_to(unsigned int new_cluster_id) tdev->evtdev->next_event, 1); } - trace_cpu_migrate_finish(get_ns(), ib_mpidr); + trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr); local_fiq_enable(); local_irq_enable(); @@ -558,7 +548,7 @@ int bL_switcher_get_logical_index(u32 mpidr) static void bL_switcher_trace_trigger_cpu(void *__always_unused info) { - trace_cpu_migrate_current(get_ns(), read_mpidr()); + trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr()); } int bL_switcher_trace_trigger(void) diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile index 648867a8caa8..2fe1824c6dcb 100644 --- a/arch/arm/mach-pxa/Makefile +++ b/arch/arm/mach-pxa/Makefile @@ -4,7 +4,7 @@ # Common support (must be linked before board specific support) obj-y += clock.o devices.o generic.o irq.o \ - time.o reset.o + reset.o obj-$(CONFIG_PM) += pm.o sleep.o standby.o # Generic drivers that other drivers may depend upon diff --git a/arch/arm/mach-pxa/generic.c b/arch/arm/mach-pxa/generic.c index 42254175fcf4..6f38e1af45af 100644 --- a/arch/arm/mach-pxa/generic.c +++ b/arch/arm/mach-pxa/generic.c @@ -25,11 +25,13 @@ #include <asm/mach/map.h> #include <asm/mach-types.h> +#include <mach/irqs.h> #include <mach/reset.h> #include <mach/smemc.h> #include <mach/pxa3xx-regs.h> #include "generic.h" +#include <clocksource/pxa.h> void clear_reset_status(unsigned int mask) { @@ -57,6 +59,15 @@ unsigned long get_clock_tick_rate(void) EXPORT_SYMBOL(get_clock_tick_rate); /* + * For non device-tree builds, keep legacy timer init + */ +void pxa_timer_init(void) +{ + pxa_timer_nodt_init(IRQ_OST0, io_p2v(0x40a00000), + get_clock_tick_rate()); +} + +/* * Get the clock frequency as reflected by CCCR and the turbo flag. * We assume these values have been applied via a fcs. * If info is not 0 we also display the current settings. diff --git a/arch/arm/mach-pxa/time.c b/arch/arm/mach-pxa/time.c deleted file mode 100644 index fca174e3865d..000000000000 --- a/arch/arm/mach-pxa/time.c +++ /dev/null @@ -1,162 +0,0 @@ -/* - * arch/arm/mach-pxa/time.c - * - * PXA clocksource, clockevents, and OST interrupt handlers. - * Copyright (c) 2007 by Bill Gatliff <bgat@billgatliff.com>. - * - * Derived from Nicolas Pitre's PXA timer handler Copyright (c) 2001 - * by MontaVista Software, Inc. (Nico, your code rocks!) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/kernel.h> -#include <linux/init.h> -#include <linux/interrupt.h> -#include <linux/clockchips.h> -#include <linux/sched_clock.h> - -#include <asm/div64.h> -#include <asm/mach/irq.h> -#include <asm/mach/time.h> -#include <mach/regs-ost.h> -#include <mach/irqs.h> - -/* - * This is PXA's sched_clock implementation. This has a resolution - * of at least 308 ns and a maximum value of 208 days. - * - * The return value is guaranteed to be monotonic in that range as - * long as there is always less than 582 seconds between successive - * calls to sched_clock() which should always be the case in practice. - */ - -static u64 notrace pxa_read_sched_clock(void) -{ - return readl_relaxed(OSCR); -} - - -#define MIN_OSCR_DELTA 16 - -static irqreturn_t -pxa_ost0_interrupt(int irq, void *dev_id) -{ - struct clock_event_device *c = dev_id; - - /* Disarm the compare/match, signal the event. */ - writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); - writel_relaxed(OSSR_M0, OSSR); - c->event_handler(c); - - return IRQ_HANDLED; -} - -static int -pxa_osmr0_set_next_event(unsigned long delta, struct clock_event_device *dev) -{ - unsigned long next, oscr; - - writel_relaxed(readl_relaxed(OIER) | OIER_E0, OIER); - next = readl_relaxed(OSCR) + delta; - writel_relaxed(next, OSMR0); - oscr = readl_relaxed(OSCR); - - return (signed)(next - oscr) <= MIN_OSCR_DELTA ? -ETIME : 0; -} - -static void -pxa_osmr0_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) -{ - switch (mode) { - case CLOCK_EVT_MODE_ONESHOT: - writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); - writel_relaxed(OSSR_M0, OSSR); - break; - - case CLOCK_EVT_MODE_UNUSED: - case CLOCK_EVT_MODE_SHUTDOWN: - /* initializing, released, or preparing for suspend */ - writel_relaxed(readl_relaxed(OIER) & ~OIER_E0, OIER); - writel_relaxed(OSSR_M0, OSSR); - break; - - case CLOCK_EVT_MODE_RESUME: - case CLOCK_EVT_MODE_PERIODIC: - break; - } -} - -#ifdef CONFIG_PM -static unsigned long osmr[4], oier, oscr; - -static void pxa_timer_suspend(struct clock_event_device *cedev) -{ - osmr[0] = readl_relaxed(OSMR0); - osmr[1] = readl_relaxed(OSMR1); - osmr[2] = readl_relaxed(OSMR2); - osmr[3] = readl_relaxed(OSMR3); - oier = readl_relaxed(OIER); - oscr = readl_relaxed(OSCR); -} - -static void pxa_timer_resume(struct clock_event_device *cedev) -{ - /* - * Ensure that we have at least MIN_OSCR_DELTA between match - * register 0 and the OSCR, to guarantee that we will receive - * the one-shot timer interrupt. We adjust OSMR0 in preference - * to OSCR to guarantee that OSCR is monotonically incrementing. - */ - if (osmr[0] - oscr < MIN_OSCR_DELTA) - osmr[0] += MIN_OSCR_DELTA; - - writel_relaxed(osmr[0], OSMR0); - writel_relaxed(osmr[1], OSMR1); - writel_relaxed(osmr[2], OSMR2); - writel_relaxed(osmr[3], OSMR3); - writel_relaxed(oier, OIER); - writel_relaxed(oscr, OSCR); -} -#else -#define pxa_timer_suspend NULL -#define pxa_timer_resume NULL -#endif - -static struct clock_event_device ckevt_pxa_osmr0 = { - .name = "osmr0", - .features = CLOCK_EVT_FEAT_ONESHOT, - .rating = 200, - .set_next_event = pxa_osmr0_set_next_event, - .set_mode = pxa_osmr0_set_mode, - .suspend = pxa_timer_suspend, - .resume = pxa_timer_resume, -}; - -static struct irqaction pxa_ost0_irq = { - .name = "ost0", - .flags = IRQF_TIMER | IRQF_IRQPOLL, - .handler = pxa_ost0_interrupt, - .dev_id = &ckevt_pxa_osmr0, -}; - -void __init pxa_timer_init(void) -{ - unsigned long clock_tick_rate = get_clock_tick_rate(); - - writel_relaxed(0, OIER); - writel_relaxed(OSSR_M0 | OSSR_M1 | OSSR_M2 | OSSR_M3, OSSR); - - sched_clock_register(pxa_read_sched_clock, 32, clock_tick_rate); - - ckevt_pxa_osmr0.cpumask = cpumask_of(0); - - setup_irq(IRQ_OST0, &pxa_ost0_irq); - - clocksource_mmio_init(OSCR, "oscr0", clock_tick_rate, 200, 32, - clocksource_mmio_readl_up); - clockevents_config_and_register(&ckevt_pxa_osmr0, clock_tick_rate, - MIN_OSCR_DELTA * 2, 0x7fffffff); -} diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c index 24f2e8c62479..a81a446a5786 100644 --- a/arch/arm64/kernel/vdso.c +++ b/arch/arm64/kernel/vdso.c @@ -219,7 +219,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm) void update_vsyscall(struct timekeeper *tk) { struct timespec xtime_coarse; - u32 use_syscall = strcmp(tk->clock->name, "arch_sys_counter"); + u32 use_syscall = strcmp(tk->tkr.clock->name, "arch_sys_counter"); ++vdso_data->tb_seq_count; smp_wmb(); @@ -232,11 +232,11 @@ void update_vsyscall(struct timekeeper *tk) vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; if (!use_syscall) { - vdso_data->cs_cycle_last = tk->clock->cycle_last; + vdso_data->cs_cycle_last = tk->tkr.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec; - vdso_data->cs_mult = tk->mult; - vdso_data->cs_shift = tk->shift; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; + vdso_data->cs_mult = tk->tkr.mult; + vdso_data->cs_shift = tk->tkr.shift; } smp_wmb(); diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 0fd6138f6203..4dc89d1f9c48 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -23,7 +23,6 @@ config HEXAGON select GENERIC_IOMAP select GENERIC_SMP_IDLE_THREAD select STACKTRACE_SUPPORT - select KTIME_SCALAR select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 71c52bc7c28d..3e71ef85e439 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -441,7 +441,7 @@ void update_vsyscall_tz(void) } void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, - struct clocksource *c, u32 mult) + struct clocksource *c, u32 mult, cycle_t cycle_last) { write_seqcount_begin(&fsyscall_gtod_data.seq); @@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, fsyscall_gtod_data.clk_mult = mult; fsyscall_gtod_data.clk_shift = c->shift; fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio; - fsyscall_gtod_data.clk_cycle_last = c->cycle_last; + fsyscall_gtod_data.clk_cycle_last = cycle_last; /* copy kernel time structures */ fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec; diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 9fff9cdcc519..368ab374d33c 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs) } void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult) + struct clocksource *clock, u32 mult, cycle_t cycle_last) { u64 new_tb_to_xs, new_stamp_xsec; u32 frac_sec; @@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, * We expect the caller to have done the first increment of * vdso_data->tb_update_count already. */ - vdso_data->tb_orig_stamp = clock->cycle_last; + vdso_data->tb_orig_stamp = cycle_last; vdso_data->stamp_xsec = new_stamp_xsec; vdso_data->tb_to_xs = new_tb_to_xs; vdso_data->wtom_clock_sec = wtm->tv_sec; diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index f85db3a69b4a..2930d1e81a05 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c @@ -611,7 +611,6 @@ static int __init create_spu(void *data) int ret; static int number; unsigned long flags; - struct timespec ts; ret = -ENOMEM; spu = kzalloc(sizeof (*spu), GFP_KERNEL); @@ -652,8 +651,7 @@ static int __init create_spu(void *data) mutex_unlock(&spu_full_list_mutex); spu->stats.util_state = SPU_UTIL_IDLE_LOADED; - ktime_get_ts(&ts); - spu->stats.tstamp = timespec_to_ns(&ts); + spu->stats.tstamp = ktime_get_ns(); INIT_LIST_HEAD(&spu->aff_list); @@ -676,7 +674,6 @@ static const char *spu_state_names[] = { static unsigned long long spu_acct_time(struct spu *spu, enum spu_utilization_state state) { - struct timespec ts; unsigned long long time = spu->stats.times[state]; /* @@ -684,10 +681,8 @@ static unsigned long long spu_acct_time(struct spu *spu, * statistics are not updated. Apply the time delta from the * last recorded state of the spu. */ - if (spu->stats.util_state == state) { - ktime_get_ts(&ts); - time += timespec_to_ns(&ts) - spu->stats.tstamp; - } + if (spu->stats.util_state == state) + time += ktime_get_ns() - spu->stats.tstamp; return time / NSEC_PER_MSEC; } diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c index 9c6790d17eda..3b4152faeb1f 100644 --- a/arch/powerpc/platforms/cell/spufs/context.c +++ b/arch/powerpc/platforms/cell/spufs/context.c @@ -36,7 +36,6 @@ atomic_t nr_spu_contexts = ATOMIC_INIT(0); struct spu_context *alloc_spu_context(struct spu_gang *gang) { struct spu_context *ctx; - struct timespec ts; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) @@ -67,8 +66,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang) __spu_update_sched_info(ctx); spu_set_timeslice(ctx); ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; - ktime_get_ts(&ts); - ctx->stats.tstamp = timespec_to_ns(&ts); + ctx->stats.tstamp = ktime_get_ns(); atomic_inc(&nr_spu_contexts); goto out; diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 90986923a53a..d966bbe58b8f 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c @@ -2338,7 +2338,6 @@ static const char *ctx_state_names[] = { static unsigned long long spufs_acct_time(struct spu_context *ctx, enum spu_utilization_state state) { - struct timespec ts; unsigned long long time = ctx->stats.times[state]; /* @@ -2351,8 +2350,7 @@ static unsigned long long spufs_acct_time(struct spu_context *ctx, * of the spu context. */ if (ctx->spu && ctx->stats.util_state == state) { - ktime_get_ts(&ts); - time += timespec_to_ns(&ts) - ctx->stats.tstamp; + time += ktime_get_ns() - ctx->stats.tstamp; } return time / NSEC_PER_MSEC; diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 4a0a64fe25df..998f632e7cce 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c @@ -1039,13 +1039,11 @@ void spuctx_switch_state(struct spu_context *ctx, { unsigned long long curtime; signed long long delta; - struct timespec ts; struct spu *spu; enum spu_utilization_state old_state; int node; - ktime_get_ts(&ts); - curtime = timespec_to_ns(&ts); + curtime = ktime_get_ns(); delta = curtime - ctx->stats.tstamp; WARN_ON(!mutex_is_locked(&ctx->state_mutex)); diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index f5af5f6ef0f4..720a11d339eb 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -136,7 +136,6 @@ config S390 select HAVE_SYSCALL_TRACEPOINTS select HAVE_UID16 if 32BIT select HAVE_VIRT_CPU_ACCOUNTING - select KTIME_SCALAR if 32BIT select MODULES_USE_ELF_RELA select NO_BOOTMEM select OLD_SIGACTION diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index 0931b110c826..4cef607f3711 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -214,26 +214,26 @@ void update_vsyscall(struct timekeeper *tk) { u64 nsecps; - if (tk->clock != &clocksource_tod) + if (tk->tkr.clock != &clocksource_tod) return; /* Make userspace gettimeofday spin until we're done. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = tk->clock->cycle_last; + vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; vdso_data->xtime_clock_sec = tk->xtime_sec; - vdso_data->xtime_clock_nsec = tk->xtime_nsec; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->wtom_clock_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - vdso_data->wtom_clock_nsec = tk->xtime_nsec + - + ((u64) tk->wall_to_monotonic.tv_nsec << tk->shift); - nsecps = (u64) NSEC_PER_SEC << tk->shift; + vdso_data->wtom_clock_nsec = tk->tkr.xtime_nsec + + + ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr.shift); + nsecps = (u64) NSEC_PER_SEC << tk->tkr.shift; while (vdso_data->wtom_clock_nsec >= nsecps) { vdso_data->wtom_clock_nsec -= nsecps; vdso_data->wtom_clock_sec++; } - vdso_data->tk_mult = tk->mult; - vdso_data->tk_shift = tk->shift; + vdso_data->tk_mult = tk->tkr.mult; + vdso_data->tk_shift = tk->tkr.shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 462dcd0c1700..d8fbc289e680 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -260,9 +260,8 @@ void update_vsyscall_tz(void) void update_vsyscall(struct timekeeper *tk) { - struct timespec wall_time = tk_xtime(tk); struct timespec *wtm = &tk->wall_to_monotonic; - struct clocksource *clock = tk->clock; + struct clocksource *clock = tk->tkr.clock; if (clock != &cycle_counter_cs) return; @@ -270,13 +269,13 @@ void update_vsyscall(struct timekeeper *tk) /* Userspace gettimeofday will spin while this value is odd. */ ++vdso_data->tb_update_count; smp_wmb(); - vdso_data->xtime_tod_stamp = clock->cycle_last; - vdso_data->xtime_clock_sec = wall_time.tv_sec; - vdso_data->xtime_clock_nsec = wall_time.tv_nsec; + vdso_data->xtime_tod_stamp = tk->tkr.cycle_last; + vdso_data->xtime_clock_sec = tk->xtime_sec; + vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec; vdso_data->wtom_clock_sec = wtm->tv_sec; vdso_data->wtom_clock_nsec = wtm->tv_nsec; - vdso_data->mult = clock->mult; - vdso_data->shift = clock->shift; + vdso_data->mult = tk->tkr.mult; + vdso_data->shift = tk->tkr.shift; smp_wmb(); ++vdso_data->tb_update_count; } diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c index 51ec8e46f5f9..e933fb9fbf5c 100644 --- a/arch/tile/kernel/vdso/vgettimeofday.c +++ b/arch/tile/kernel/vdso/vgettimeofday.c @@ -83,10 +83,11 @@ int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) if (count & 1) continue; - cycles = (get_cycles() - vdso_data->xtime_tod_stamp); - ns = (cycles * vdso_data->mult) >> vdso_data->shift; sec = vdso_data->xtime_clock_sec; - ns += vdso_data->xtime_clock_nsec; + cycles = get_cycles() - vdso_data->xtime_tod_stamp; + ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec; + ns >>= vdso_data->shift; + if (ns >= NSEC_PER_SEC) { ns -= NSEC_PER_SEC; sec += 1; diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 6b71f0417293..6cfeb082a422 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -108,9 +108,9 @@ config X86 select CLOCKSOURCE_WATCHDOG select GENERIC_CLOCKEVENTS select ARCH_CLOCKSOURCE_DATA + select CLOCKSOURCE_VALIDATE_LAST_CYCLE select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) select GENERIC_TIME_VSYSCALL - select KTIME_SCALAR if X86_32 select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER select HAVE_CONTEXT_TRACKING if X86_64 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 56b0c338061e..b6025f9e36c6 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -950,7 +950,7 @@ core_initcall(cpufreq_tsc); static struct clocksource clocksource_tsc; /* - * We compare the TSC to the cycle_last value in the clocksource + * We used to compare the TSC to the cycle_last value in the clocksource * structure to avoid a nasty time-warp. This can be observed in a * very small window right after one CPU updated cycle_last under * xtime/vsyscall_gtod lock and the other CPU reads a TSC value which @@ -960,26 +960,23 @@ static struct clocksource clocksource_tsc; * due to the unsigned delta calculation of the time keeping core * code, which is necessary to support wrapping clocksources like pm * timer. + * + * This sanity check is now done in the core timekeeping code. + * checking the result of read_tsc() - cycle_last for being negative. + * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. */ static cycle_t read_tsc(struct clocksource *cs) { - cycle_t ret = (cycle_t)get_cycles(); - - return ret >= clocksource_tsc.cycle_last ? - ret : clocksource_tsc.cycle_last; -} - -static void resume_tsc(struct clocksource *cs) -{ - if (!boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3)) - clocksource_tsc.cycle_last = 0; + return (cycle_t)get_cycles(); } +/* + * .mask MUST be CLOCKSOURCE_MASK(64). See comment above read_tsc() + */ static struct clocksource clocksource_tsc = { .name = "tsc", .rating = 300, .read = read_tsc, - .resume = resume_tsc, .mask = CLOCKSOURCE_MASK(64), .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_MUST_VERIFY, diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c index 9531fbb123ba..c7d791f32b98 100644 --- a/arch/x86/kernel/vsyscall_gtod.c +++ b/arch/x86/kernel/vsyscall_gtod.c @@ -31,29 +31,30 @@ void update_vsyscall(struct timekeeper *tk) gtod_write_begin(vdata); /* copy vsyscall data */ - vdata->vclock_mode = tk->clock->archdata.vclock_mode; - vdata->cycle_last = tk->clock->cycle_last; - vdata->mask = tk->clock->mask; - vdata->mult = tk->mult; - vdata->shift = tk->shift; + vdata->vclock_mode = tk->tkr.clock->archdata.vclock_mode; + vdata->cycle_last = tk->tkr.cycle_last; + vdata->mask = tk->tkr.mask; + vdata->mult = tk->tkr.mult; + vdata->shift = tk->tkr.shift; vdata->wall_time_sec = tk->xtime_sec; - vdata->wall_time_snsec = tk->xtime_nsec; + vdata->wall_time_snsec = tk->tkr.xtime_nsec; vdata->monotonic_time_sec = tk->xtime_sec + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_snsec = tk->xtime_nsec + vdata->monotonic_time_snsec = tk->tkr.xtime_nsec + ((u64)tk->wall_to_monotonic.tv_nsec - << tk->shift); + << tk->tkr.shift); while (vdata->monotonic_time_snsec >= - (((u64)NSEC_PER_SEC) << tk->shift)) { + (((u64)NSEC_PER_SEC) << tk->tkr.shift)) { vdata->monotonic_time_snsec -= - ((u64)NSEC_PER_SEC) << tk->shift; + ((u64)NSEC_PER_SEC) << tk->tkr.shift; vdata->monotonic_time_sec++; } vdata->wall_time_coarse_sec = tk->xtime_sec; - vdata->wall_time_coarse_nsec = (long)(tk->xtime_nsec >> tk->shift); + vdata->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >> + tk->tkr.shift); vdata->monotonic_time_coarse_sec = vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b86d329b953a..ca3d760dd581 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1020,9 +1020,8 @@ struct pvclock_gtod_data { u32 shift; } clock; - /* open coded 'struct timespec' */ - u64 monotonic_time_snsec; - time_t monotonic_time_sec; + u64 boot_ns; + u64 nsec_base; }; static struct pvclock_gtod_data pvclock_gtod_data; @@ -1030,27 +1029,21 @@ static struct pvclock_gtod_data pvclock_gtod_data; static void update_pvclock_gtod(struct timekeeper *tk) { struct pvclock_gtod_data *vdata = &pvclock_gtod_data; + u64 boot_ns; + + boot_ns = ktime_to_ns(ktime_add(tk->tkr.base_mono, tk->offs_boot)); write_seqcount_begin(&vdata->seq); /* copy pvclock gtod data */ - vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode; - vdata->clock.cycle_last = tk->clock->cycle_last; - vdata->clock.mask = tk->clock->mask; - vdata->clock.mult = tk->mult; - vdata->clock.shift = tk->shift; - - vdata->monotonic_time_sec = tk->xtime_sec - + tk->wall_to_monotonic.tv_sec; - vdata->monotonic_time_snsec = tk->xtime_nsec - + (tk->wall_to_monotonic.tv_nsec - << tk->shift); - while (vdata->monotonic_time_snsec >= - (((u64)NSEC_PER_SEC) << tk->shift)) { - vdata->monotonic_time_snsec -= - ((u64)NSEC_PER_SEC) << tk->shift; - vdata->monotonic_time_sec++; - } + vdata->clock.vclock_mode = tk->tkr.clock->archdata.vclock_mode; + vdata->clock.cycle_last = tk->tkr.cycle_last; + vdata->clock.mask = tk->tkr.mask; + vdata->clock.mult = tk->tkr.mult; + vdata->clock.shift = tk->tkr.shift; + + vdata->boot_ns = boot_ns; + vdata->nsec_base = tk->tkr.xtime_nsec; write_seqcount_end(&vdata->seq); } @@ -1145,11 +1138,7 @@ static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz, static inline u64 get_kernel_ns(void) { - struct timespec ts; - - ktime_get_ts(&ts); - monotonic_to_bootbased(&ts); - return timespec_to_ns(&ts); + return ktime_get_boot_ns(); } #ifdef CONFIG_X86_64 @@ -1414,23 +1403,22 @@ static inline u64 vgettsc(cycle_t *cycle_now) return v * gtod->clock.mult; } -static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) +static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) { + struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; - u64 ns; int mode; - struct pvclock_gtod_data *gtod = &pvclock_gtod_data; + u64 ns; - ts->tv_nsec = 0; do { seq = read_seqcount_begin(>od->seq); mode = gtod->clock.vclock_mode; - ts->tv_sec = gtod->monotonic_time_sec; - ns = gtod->monotonic_time_snsec; + ns = gtod->nsec_base; ns += vgettsc(cycle_now); ns >>= gtod->clock.shift; + ns += gtod->boot_ns; } while (unlikely(read_seqcount_retry(>od->seq, seq))); - timespec_add_ns(ts, ns); + *t = ns; return mode; } @@ -1438,19 +1426,11 @@ static int do_monotonic(struct timespec *ts, cycle_t *cycle_now) /* returns true if host is using tsc clocksource */ static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) { - struct timespec ts; - /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) return false; - if (do_monotonic(&ts, cycle_now) != VCLOCK_TSC) - return false; - - monotonic_to_bootbased(&ts); - *kernel_ns = timespec_to_ns(&ts); - - return true; + return do_monotonic_boot(kernel_ns, cycle_now) == VCLOCK_TSC; } #endif |