diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-26 01:30:04 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-26 01:30:04 +0300 |
commit | 3ddc76dfc786cc6f87852693227fb0b1f124f807 (patch) | |
tree | 8192b4721e05cf6823087f9696db8c0c8f144b02 /arch | |
parent | b272f732f888d4cf43c943a40c9aaa836f9b7431 (diff) | |
parent | 1f3a8e49d8f28f498b8694464623ac20aebfe62a (diff) | |
download | linux-3ddc76dfc786cc6f87852693227fb0b1f124f807.tar.xz |
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer type cleanups from Thomas Gleixner:
"This series does a tree wide cleanup of types related to
timers/timekeeping.
- Get rid of cycles_t and use a plain u64. The type is not really
helpful and caused more confusion than clarity
- Get rid of the ktime union. The union has become useless as we use
the scalar nanoseconds storage unconditionally now. The 32bit
timespec alike storage got removed due to the Y2038 limitations
some time ago.
That leaves the odd union access around for no reason. Clean it up.
Both changes have been done with coccinelle and a small amount of
manual mopping up"
* 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
ktime: Get rid of ktime_equal()
ktime: Cleanup ktime_set() usage
ktime: Get rid of the union
clocksource: Use a plain u64 instead of cycle_t
Diffstat (limited to 'arch')
67 files changed, 129 insertions, 131 deletions
diff --git a/arch/alpha/kernel/time.c b/arch/alpha/kernel/time.c index 992000e3d9e4..3bfe058d75d9 100644 --- a/arch/alpha/kernel/time.c +++ b/arch/alpha/kernel/time.c @@ -133,7 +133,7 @@ init_rtc_clockevent(void) * The QEMU clock as a clocksource primitive. */ -static cycle_t +static u64 qemu_cs_read(struct clocksource *cs) { return qemu_get_vmtime(); @@ -260,7 +260,7 @@ common_init_rtc(void) * use this method when WTINT is in use. */ -static cycle_t read_rpcc(struct clocksource *cs) +static u64 read_rpcc(struct clocksource *cs) { return rpcc(); } diff --git a/arch/arm/mach-davinci/time.c b/arch/arm/mach-davinci/time.c index 6c18445a4639..034f865fe78e 100644 --- a/arch/arm/mach-davinci/time.c +++ b/arch/arm/mach-davinci/time.c @@ -268,7 +268,7 @@ static void __init timer_init(void) /* * clocksource */ -static cycle_t read_cycles(struct clocksource *cs) +static u64 read_cycles(struct clocksource *cs) { struct timer_s *t = &timers[TID_CLOCKSOURCE]; diff --git a/arch/arm/mach-ep93xx/timer-ep93xx.c b/arch/arm/mach-ep93xx/timer-ep93xx.c index e5f791145bd0..874cbc91b669 100644 --- a/arch/arm/mach-ep93xx/timer-ep93xx.c +++ b/arch/arm/mach-ep93xx/timer-ep93xx.c @@ -59,13 +59,13 @@ static u64 notrace ep93xx_read_sched_clock(void) return ret; } -cycle_t ep93xx_clocksource_read(struct clocksource *c) +u64 ep93xx_clocksource_read(struct clocksource *c) { u64 ret; ret = readl(EP93XX_TIMER4_VALUE_LOW); ret |= ((u64) (readl(EP93XX_TIMER4_VALUE_HIGH) & 0xff) << 32); - return (cycle_t) ret; + return (u64) ret; } static int ep93xx_clkevt_set_next_event(unsigned long next, diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c index 810edc78c817..75395a720e63 100644 --- a/arch/arm/mach-footbridge/dc21285-timer.c +++ b/arch/arm/mach-footbridge/dc21285-timer.c @@ -19,7 +19,7 @@ #include "common.h" -static cycle_t cksrc_dc21285_read(struct clocksource *cs) +static u64 cksrc_dc21285_read(struct clocksource *cs) { return cs->mask - *CSR_TIMER2_VALUE; } diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c index 0f08f611c1a6..846e033c56fa 100644 --- a/arch/arm/mach-ixp4xx/common.c +++ b/arch/arm/mach-ixp4xx/common.c @@ -493,7 +493,7 @@ static u64 notrace ixp4xx_read_sched_clock(void) * clocksource */ -static cycle_t ixp4xx_clocksource_read(struct clocksource *c) +static u64 ixp4xx_clocksource_read(struct clocksource *c) { return *IXP4XX_OSTS; } diff --git a/arch/arm/mach-mmp/time.c b/arch/arm/mach-mmp/time.c index 3c2c92aaa0ae..96ad1db0b04b 100644 --- a/arch/arm/mach-mmp/time.c +++ b/arch/arm/mach-mmp/time.c @@ -144,7 +144,7 @@ static struct clock_event_device ckevt = { .set_state_oneshot = timer_set_shutdown, }; -static cycle_t clksrc_read(struct clocksource *cs) +static u64 clksrc_read(struct clocksource *cs) { return timer_read(); } diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 5e2e2218a402..56128da23c3a 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -369,9 +369,9 @@ static bool use_gptimer_clksrc __initdata; /* * clocksource */ -static cycle_t clocksource_read_cycles(struct clocksource *cs) +static u64 clocksource_read_cycles(struct clocksource *cs) { - return (cycle_t)__omap_dm_timer_read_counter(&clksrc, + return (u64)__omap_dm_timer_read_counter(&clksrc, OMAP_TIMER_NONPOSTED); } diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index ed8d129d4bea..2cff0010f677 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -38,7 +38,7 @@ /* * IOP clocksource (free-running timer 1). */ -static cycle_t notrace iop_clocksource_read(struct clocksource *unused) +static u64 notrace iop_clocksource_read(struct clocksource *unused) { return 0xffffffffu - read_tcr1(); } diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c index a124c55733db..4d9b69615979 100644 --- a/arch/avr32/kernel/time.c +++ b/arch/avr32/kernel/time.c @@ -20,9 +20,9 @@ static bool disable_cpu_idle_poll; -static cycle_t read_cycle_count(struct clocksource *cs) +static u64 read_cycle_count(struct clocksource *cs) { - return (cycle_t)sysreg_read(COUNT); + return (u64)sysreg_read(COUNT); } /* diff --git a/arch/blackfin/kernel/time-ts.c b/arch/blackfin/kernel/time-ts.c index fb9e95f1b719..0e9fcf841d67 100644 --- a/arch/blackfin/kernel/time-ts.c +++ b/arch/blackfin/kernel/time-ts.c @@ -26,7 +26,7 @@ #if defined(CONFIG_CYCLES_CLOCKSOURCE) -static notrace cycle_t bfin_read_cycles(struct clocksource *cs) +static notrace u64 bfin_read_cycles(struct clocksource *cs) { #ifdef CONFIG_CPU_FREQ return __bfin_cycles_off + (get_cycles() << __bfin_cycles_mod); @@ -80,7 +80,7 @@ void __init setup_gptimer0(void) enable_gptimers(TIMER0bit); } -static cycle_t bfin_read_gptimer0(struct clocksource *cs) +static u64 bfin_read_gptimer0(struct clocksource *cs) { return bfin_read_TIMER0_COUNTER(); } diff --git a/arch/c6x/kernel/time.c b/arch/c6x/kernel/time.c index 04845aaf5985..6a8e00a1f6d5 100644 --- a/arch/c6x/kernel/time.c +++ b/arch/c6x/kernel/time.c @@ -26,7 +26,7 @@ static u32 sched_clock_multiplier; #define SCHED_CLOCK_SHIFT 16 -static cycle_t tsc_read(struct clocksource *cs) +static u64 tsc_read(struct clocksource *cs) { return get_cycles(); } diff --git a/arch/hexagon/kernel/time.c b/arch/hexagon/kernel/time.c index a6a1d1f8309a..ff4e9bf995e9 100644 --- a/arch/hexagon/kernel/time.c +++ b/arch/hexagon/kernel/time.c @@ -72,9 +72,9 @@ struct adsp_hw_timer_struct { /* Look for "TCX0" for related constants. */ static __iomem struct adsp_hw_timer_struct *rtos_timer; -static cycle_t timer_get_cycles(struct clocksource *cs) +static u64 timer_get_cycles(struct clocksource *cs) { - return (cycle_t) __vmgettime(); + return (u64) __vmgettime(); } static struct clocksource hexagon_clocksource = { diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c index 5fa3848ba224..ee1a4afbf9da 100644 --- a/arch/ia64/kernel/cyclone.c +++ b/arch/ia64/kernel/cyclone.c @@ -21,9 +21,9 @@ void __init cyclone_setup(void) static void __iomem *cyclone_mc; -static cycle_t read_cyclone(struct clocksource *cs) +static u64 read_cyclone(struct clocksource *cs) { - return (cycle_t)readq((void __iomem *)cyclone_mc); + return (u64)readq((void __iomem *)cyclone_mc); } static struct clocksource clocksource_cyclone = { diff --git a/arch/ia64/kernel/fsyscall_gtod_data.h b/arch/ia64/kernel/fsyscall_gtod_data.h index 146b15b5fec3..dcc514917731 100644 --- a/arch/ia64/kernel/fsyscall_gtod_data.h +++ b/arch/ia64/kernel/fsyscall_gtod_data.h @@ -9,15 +9,15 @@ struct fsyscall_gtod_data_t { seqcount_t seq; struct timespec wall_time; struct timespec monotonic_time; - cycle_t clk_mask; + u64 clk_mask; u32 clk_mult; u32 clk_shift; void *clk_fsys_mmio; - cycle_t clk_cycle_last; + u64 clk_cycle_last; } ____cacheline_aligned; struct itc_jitter_data_t { int itc_jitter; - cycle_t itc_lastcycle; + u64 itc_lastcycle; } ____cacheline_aligned; diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 021f44ab4bfb..71775b95d6cc 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -31,7 +31,7 @@ #include "fsyscall_gtod_data.h" -static cycle_t itc_get_cycles(struct clocksource *cs); +static u64 itc_get_cycles(struct clocksource *cs); struct fsyscall_gtod_data_t fsyscall_gtod_data; @@ -323,7 +323,7 @@ void ia64_init_itm(void) } } -static cycle_t itc_get_cycles(struct clocksource *cs) +static u64 itc_get_cycles(struct clocksource *cs) { unsigned long lcycle, now, ret; @@ -397,7 +397,7 @@ void update_vsyscall_tz(void) } void update_vsyscall_old(struct timespec *wall, struct timespec *wtm, - struct clocksource *c, u32 mult, cycle_t cycle_last) + struct clocksource *c, u32 mult, u64 cycle_last) { write_seqcount_begin(&fsyscall_gtod_data.seq); diff --git a/arch/ia64/sn/kernel/sn2/timer.c b/arch/ia64/sn/kernel/sn2/timer.c index abab8f99e913..66edc36426ed 100644 --- a/arch/ia64/sn/kernel/sn2/timer.c +++ b/arch/ia64/sn/kernel/sn2/timer.c @@ -22,9 +22,9 @@ extern unsigned long sn_rtc_cycles_per_second; -static cycle_t read_sn2(struct clocksource *cs) +static u64 read_sn2(struct clocksource *cs) { - return (cycle_t)readq(RTC_COUNTER_ADDR); + return (u64)readq(RTC_COUNTER_ADDR); } static struct clocksource clocksource_sn2 = { diff --git a/arch/m68k/68000/timers.c b/arch/m68k/68000/timers.c index 99a98698bc95..252455bce144 100644 --- a/arch/m68k/68000/timers.c +++ b/arch/m68k/68000/timers.c @@ -76,7 +76,7 @@ static struct irqaction m68328_timer_irq = { /***************************************************************************/ -static cycle_t m68328_read_clk(struct clocksource *cs) +static u64 m68328_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68k/coldfire/dma_timer.c b/arch/m68k/coldfire/dma_timer.c index 235ad57c4707..8273eea57874 100644 --- a/arch/m68k/coldfire/dma_timer.c +++ b/arch/m68k/coldfire/dma_timer.c @@ -34,7 +34,7 @@ #define DMA_DTMR_CLK_DIV_16 (2 << 1) #define DMA_DTMR_ENABLE (1 << 0) -static cycle_t cf_dt_get_cycles(struct clocksource *cs) +static u64 cf_dt_get_cycles(struct clocksource *cs) { return __raw_readl(DTCN0); } diff --git a/arch/m68k/coldfire/pit.c b/arch/m68k/coldfire/pit.c index d86a9ffb3f13..175553d5b8ed 100644 --- a/arch/m68k/coldfire/pit.c +++ b/arch/m68k/coldfire/pit.c @@ -118,7 +118,7 @@ static struct irqaction pit_irq = { /***************************************************************************/ -static cycle_t pit_read_clk(struct clocksource *cs) +static u64 pit_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/m68k/coldfire/sltimers.c b/arch/m68k/coldfire/sltimers.c index 831a08cf6f40..3292c0d68b18 100644 --- a/arch/m68k/coldfire/sltimers.c +++ b/arch/m68k/coldfire/sltimers.c @@ -97,7 +97,7 @@ static struct irqaction mcfslt_timer_irq = { .handler = mcfslt_tick, }; -static cycle_t mcfslt_read_clk(struct clocksource *cs) +static u64 mcfslt_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles, scnt; diff --git a/arch/m68k/coldfire/timers.c b/arch/m68k/coldfire/timers.c index cd496a20fcc7..2dc7a58204f6 100644 --- a/arch/m68k/coldfire/timers.c +++ b/arch/m68k/coldfire/timers.c @@ -89,7 +89,7 @@ static struct irqaction mcftmr_timer_irq = { /***************************************************************************/ -static cycle_t mcftmr_read_clk(struct clocksource *cs) +static u64 mcftmr_read_clk(struct clocksource *cs) { unsigned long flags; u32 cycles; diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c index 9e954959f605..1d6fad50fa76 100644 --- a/arch/microblaze/kernel/timer.c +++ b/arch/microblaze/kernel/timer.c @@ -190,17 +190,17 @@ static u64 xilinx_clock_read(void) return read_fn(timer_baseaddr + TCR1); } -static cycle_t xilinx_read(struct clocksource *cs) +static u64 xilinx_read(struct clocksource *cs) { /* reading actual value of timer 1 */ - return (cycle_t)xilinx_clock_read(); + return (u64)xilinx_clock_read(); } static struct timecounter xilinx_tc = { .cc = NULL, }; -static cycle_t xilinx_cc_read(const struct cyclecounter *cc) +static u64 xilinx_cc_read(const struct cyclecounter *cc) { return xilinx_read(NULL); } diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c index f99d3ec17a45..e1bec5a77c39 100644 --- a/arch/mips/alchemy/common/time.c +++ b/arch/mips/alchemy/common/time.c @@ -44,7 +44,7 @@ /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) -static cycle_t au1x_counter1_read(struct clocksource *cs) +static u64 au1x_counter1_read(struct clocksource *cs) { return alchemy_rdsys(AU1000_SYS_RTCREAD); } diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c index 23c2344a3552..39f153fe0022 100644 --- a/arch/mips/cavium-octeon/csrc-octeon.c +++ b/arch/mips/cavium-octeon/csrc-octeon.c @@ -98,7 +98,7 @@ void octeon_init_cvmcount(void) local_irq_restore(flags); } -static cycle_t octeon_cvmcount_read(struct clocksource *cs) +static u64 octeon_cvmcount_read(struct clocksource *cs) { return read_c0_cvmcount(); } diff --git a/arch/mips/jz4740/time.c b/arch/mips/jz4740/time.c index 1f7ca2c9f262..bcf8f8c62737 100644 --- a/arch/mips/jz4740/time.c +++ b/arch/mips/jz4740/time.c @@ -34,7 +34,7 @@ static uint16_t jz4740_jiffies_per_tick; -static cycle_t jz4740_clocksource_read(struct clocksource *cs) +static u64 jz4740_clocksource_read(struct clocksource *cs) { return jz4740_timer_get_count(TIMER_CLOCKSOURCE); } diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c index 537eefdf838f..aaca60d6ffc3 100644 --- a/arch/mips/kernel/cevt-txx9.c +++ b/arch/mips/kernel/cevt-txx9.c @@ -27,7 +27,7 @@ struct txx9_clocksource { struct txx9_tmr_reg __iomem *tmrptr; }; -static cycle_t txx9_cs_read(struct clocksource *cs) +static u64 txx9_cs_read(struct clocksource *cs) { struct txx9_clocksource *txx9_cs = container_of(cs, struct txx9_clocksource, cs); diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c index 7f65b53d1b24..f011261e9506 100644 --- a/arch/mips/kernel/csrc-bcm1480.c +++ b/arch/mips/kernel/csrc-bcm1480.c @@ -25,9 +25,9 @@ #include <asm/sibyte/sb1250.h> -static cycle_t bcm1480_hpt_read(struct clocksource *cs) +static u64 bcm1480_hpt_read(struct clocksource *cs) { - return (cycle_t) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); + return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); } struct clocksource bcm1480_clocksource = { diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c index 722f5589cd1d..f6acd1e58c26 100644 --- a/arch/mips/kernel/csrc-ioasic.c +++ b/arch/mips/kernel/csrc-ioasic.c @@ -22,7 +22,7 @@ #include <asm/dec/ioasic.h> #include <asm/dec/ioasic_addrs.h> -static cycle_t dec_ioasic_hpt_read(struct clocksource *cs) +static u64 dec_ioasic_hpt_read(struct clocksource *cs) { return ioasic_read(IO_REG_FCTR); } diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c index d76275da54cb..eed099f35bf1 100644 --- a/arch/mips/kernel/csrc-r4k.c +++ b/arch/mips/kernel/csrc-r4k.c @@ -11,7 +11,7 @@ #include <asm/time.h> -static cycle_t c0_hpt_read(struct clocksource *cs) +static u64 c0_hpt_read(struct clocksource *cs) { return read_c0_count(); } diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c index d915652b4d56..b07b7310d3f4 100644 --- a/arch/mips/kernel/csrc-sb1250.c +++ b/arch/mips/kernel/csrc-sb1250.c @@ -30,7 +30,7 @@ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over * again. */ -static inline cycle_t sb1250_hpt_get_cycles(void) +static inline u64 sb1250_hpt_get_cycles(void) { unsigned int count; void __iomem *addr; @@ -41,7 +41,7 @@ static inline cycle_t sb1250_hpt_get_cycles(void) return SB1250_HPT_VALUE - count; } -static cycle_t sb1250_hpt_read(struct clocksource *cs) +static u64 sb1250_hpt_read(struct clocksource *cs) { return sb1250_hpt_get_cycles(); } diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c index ff224f0020e5..e6f972d35252 100644 --- a/arch/mips/loongson32/common/time.c +++ b/arch/mips/loongson32/common/time.c @@ -63,7 +63,7 @@ void __init ls1x_pwmtimer_init(void) ls1x_pwmtimer_restart(); } -static cycle_t ls1x_clocksource_read(struct clocksource *cs) +static u64 ls1x_clocksource_read(struct clocksource *cs) { unsigned long flags; int count; @@ -107,7 +107,7 @@ static cycle_t ls1x_clocksource_read(struct clocksource *cs) raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags); - return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count; + return (u64) (jifs * ls1x_jiffies_per_tick) + count; } static struct clocksource ls1x_clocksource = { diff --git a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c index da77d412514c..9edfa55a0e78 100644 --- a/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c +++ b/arch/mips/loongson64/common/cs5536/cs5536_mfgpt.c @@ -144,7 +144,7 @@ void __init setup_mfgpt0_timer(void) * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t mfgpt_read(struct clocksource *cs) +static u64 mfgpt_read(struct clocksource *cs) { unsigned long flags; int count; @@ -188,7 +188,7 @@ static cycle_t mfgpt_read(struct clocksource *cs) raw_spin_unlock_irqrestore(&mfgpt_lock, flags); - return (cycle_t) (jifs * COMPARE) + count; + return (u64) (jifs * COMPARE) + count; } static struct clocksource clocksource_mfgpt = { diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c index 4788bea62a6a..24afe364637b 100644 --- a/arch/mips/loongson64/loongson-3/hpet.c +++ b/arch/mips/loongson64/loongson-3/hpet.c @@ -248,9 +248,9 @@ void __init setup_hpet_timer(void) pr_info("hpet clock event device register\n"); } -static cycle_t hpet_read_counter(struct clocksource *cs) +static u64 hpet_read_counter(struct clocksource *cs) { - return (cycle_t)hpet_read(HPET_COUNTER); + return (u64)hpet_read(HPET_COUNTER); } static void hpet_suspend(struct clocksource *cs) diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c index 7407da04f8d6..1829a9031eec 100644 --- a/arch/mips/mti-malta/malta-time.c +++ b/arch/mips/mti-malta/malta-time.c @@ -75,7 +75,7 @@ static void __init estimate_frequencies(void) unsigned int count, start; unsigned char secs1, secs2, ctrl; int secs; - cycle_t giccount = 0, gicstart = 0; + u64 giccount = 0, gicstart = 0; #if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c index 5873c83e65be..cbbf0d48216b 100644 --- a/arch/mips/netlogic/common/time.c +++ b/arch/mips/netlogic/common/time.c @@ -59,14 +59,14 @@ unsigned int get_c0_compare_int(void) return IRQ_TIMER; } -static cycle_t nlm_get_pic_timer(struct clocksource *cs) +static u64 nlm_get_pic_timer(struct clocksource *cs) { uint64_t picbase = nlm_get_node(0)->picbase; return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER); } -static cycle_t nlm_get_pic_timer32(struct clocksource *cs) +static u64 nlm_get_pic_timer32(struct clocksource *cs) { uint64_t picbase = nlm_get_node(0)->picbase; diff --git a/arch/mips/sgi-ip27/ip27-timer.c b/arch/mips/sgi-ip27/ip27-timer.c index 42d6cb9f956e..695c51bdd7dc 100644 --- a/arch/mips/sgi-ip27/ip27-timer.c +++ b/arch/mips/sgi-ip27/ip27-timer.c @@ -140,7 +140,7 @@ static void __init hub_rt_clock_event_global_init(void) setup_irq(irq, &hub_rt_irqaction); } -static cycle_t hub_rt_read(struct clocksource *cs) +static u64 hub_rt_read(struct clocksource *cs) { return REMOTE_HUB_L(cputonasid(0), PI_RT_COUNT); } diff --git a/arch/mn10300/kernel/csrc-mn10300.c b/arch/mn10300/kernel/csrc-mn10300.c index 45644cf18c41..6b74df3661f2 100644 --- a/arch/mn10300/kernel/csrc-mn10300.c +++ b/arch/mn10300/kernel/csrc-mn10300.c @@ -13,7 +13,7 @@ #include <asm/timex.h> #include "internal.h" -static cycle_t mn10300_read(struct clocksource *cs) +static u64 mn10300_read(struct clocksource *cs) { return read_timestamp_counter(); } diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c index 746bf5caaffc..6e2bdc9b8530 100644 --- a/arch/nios2/kernel/time.c +++ b/arch/nios2/kernel/time.c @@ -81,7 +81,7 @@ static inline unsigned long read_timersnapshot(struct nios2_timer *timer) return count; } -static cycle_t nios2_timer_read(struct clocksource *cs) +static u64 nios2_timer_read(struct clocksource *cs) { struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs); unsigned long flags; diff --git a/arch/openrisc/kernel/time.c b/arch/openrisc/kernel/time.c index 50e970183dcd..687c11d048d7 100644 --- a/arch/openrisc/kernel/time.c +++ b/arch/openrisc/kernel/time.c @@ -117,9 +117,9 @@ static __init void openrisc_clockevent_init(void) * is 32 bits wide and runs at the CPU clock frequency. */ -static cycle_t openrisc_timer_read(struct clocksource *cs) +static u64 openrisc_timer_read(struct clocksource *cs) { - return (cycle_t) mfspr(SPR_TTCR); + return (u64) mfspr(SPR_TTCR); } static struct clocksource openrisc_timer = { diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 037d81f00520..da0d9cb63403 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -137,7 +137,7 @@ EXPORT_SYMBOL(profile_pc); /* clock source code */ -static cycle_t notrace read_cr16(struct clocksource *cs) +static u64 notrace read_cr16(struct clocksource *cs) { return get_cycles(); } diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 19397e2a8bf5..bc2e08d415fa 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -80,7 +80,7 @@ #include <linux/clockchips.h> #include <linux/timekeeper_internal.h> -static cycle_t rtc_read(struct clocksource *); +static u64 rtc_read(struct clocksource *); static struct clocksource clocksource_rtc = { .name = "rtc", .rating = 400, @@ -89,7 +89,7 @@ static struct clocksource clocksource_rtc = { .read = rtc_read, }; -static cycle_t timebase_read(struct clocksource *); +static u64 timebase_read(struct clocksource *); static struct clocksource clocksource_timebase = { .name = "timebase", .rating = 400, @@ -802,18 +802,18 @@ void read_persistent_clock(struct timespec *ts) } /* clocksource code */ -static cycle_t rtc_read(struct clocksource *cs) +static u64 rtc_read(struct clocksource *cs) { - return (cycle_t)get_rtc(); + return (u64)get_rtc(); } -static cycle_t timebase_read(struct clocksource *cs) +static u64 timebase_read(struct clocksource *cs) { - return (cycle_t)get_tb(); + return (u64)get_tb(); } void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm, - struct clocksource *clock, u32 mult, cycle_t cycle_last) + struct clocksource *clock, u32 mult, u64 cycle_last) { u64 new_tb_to_xs, new_stamp_xsec; u32 frac_sec; diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 66b2a35be424..ec34e39471a7 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -1872,8 +1872,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu) } dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC / tb_ticks_per_sec; - hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), - HRTIMER_MODE_REL); + hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); vcpu->arch.timer_running = 1; } diff --git a/arch/powerpc/oprofile/cell/spu_profiler.c b/arch/powerpc/oprofile/cell/spu_profiler.c index b19265de9178..5182f2936af2 100644 --- a/arch/powerpc/oprofile/cell/spu_profiler.c +++ b/arch/powerpc/oprofile/cell/spu_profiler.c @@ -180,7 +180,7 @@ static enum hrtimer_restart profile_spus(struct hrtimer *timer) smp_wmb(); /* insure spu event buffer updates are written */ /* don't want events intermingled... */ - kt = ktime_set(0, profiling_interval); + kt = profiling_interval; if (!spu_prof_running) goto stop; hrtimer_forward(timer, timer->base->get_time(), kt); @@ -204,7 +204,7 @@ int start_spu_profiling_cycles(unsigned int cycles_reset) ktime_t kt; pr_debug("timer resolution: %lu\n", TICK_NSEC); - kt = ktime_set(0, profiling_interval); + kt = profiling_interval; hrtimer_init(&timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer_set_expires(&timer, kt); timer.function = profile_spus; diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ec76315c9ee5..52949df88529 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c @@ -209,7 +209,7 @@ void read_boot_clock64(struct timespec64 *ts) tod_to_timeval(clock - TOD_UNIX_EPOCH, ts); } -static cycle_t read_tod_clock(struct clocksource *cs) +static u64 read_tod_clock(struct clocksource *cs) { unsigned long long now, adj; diff --git a/arch/s390/kvm/interrupt.c b/arch/s390/kvm/interrupt.c index 6843dd5a1cba..0f8f14199734 100644 --- a/arch/s390/kvm/interrupt.c +++ b/arch/s390/kvm/interrupt.c @@ -1019,7 +1019,7 @@ int kvm_s390_handle_wait(struct kvm_vcpu *vcpu) return 0; __set_cpu_idle(vcpu); - hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL); + hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL); VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime); no_timer: srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 1affabc96b08..244062bdaa56 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -148,7 +148,7 @@ static unsigned int sbus_cycles_offset(void) return offset; } -static cycle_t timer_cs_read(struct clocksource *cs) +static u64 timer_cs_read(struct clocksource *cs) { unsigned int seq, offset; u64 cycles; diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 807f7e2ce014..12a6d3555cb8 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -770,7 +770,7 @@ void udelay(unsigned long usecs) } EXPORT_SYMBOL(udelay); -static cycle_t clocksource_tick_read(struct clocksource *cs) +static u64 clocksource_tick_read(struct clocksource *cs) { return tick_ops->get_tick(); } diff --git a/arch/um/kernel/time.c b/arch/um/kernel/time.c index 25c23666d592..ba87a27d6715 100644 --- a/arch/um/kernel/time.c +++ b/arch/um/kernel/time.c @@ -83,7 +83,7 @@ static irqreturn_t um_timer(int irq, void *dev) return IRQ_HANDLED; } -static cycle_t timer_read(struct clocksource *cs) +static u64 timer_read(struct clocksource *cs) { return os_nsecs() / TIMER_MULTIPLIER; } diff --git a/arch/unicore32/kernel/time.c b/arch/unicore32/kernel/time.c index ac4c5449bb88..fceaa673f861 100644 --- a/arch/unicore32/kernel/time.c +++ b/arch/unicore32/kernel/time.c @@ -62,7 +62,7 @@ static struct clock_event_device ckevt_puv3_osmr0 = { .set_state_oneshot = puv3_osmr0_shutdown, }; -static cycle_t puv3_read_oscr(struct clocksource *cs) +static u64 puv3_read_oscr(struct clocksource *cs) { return readl(OST_OSCR); } diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c index 02223cb4bcfd..9d4d6e138311 100644 --- a/arch/x86/entry/vdso/vclock_gettime.c +++ b/arch/x86/entry/vdso/vclock_gettime.c @@ -92,10 +92,10 @@ static notrace const struct pvclock_vsyscall_time_info *get_pvti0(void) return (const struct pvclock_vsyscall_time_info *)&pvclock_page; } -static notrace cycle_t vread_pvclock(int *mode) +static notrace u64 vread_pvclock(int *mode) { const struct pvclock_vcpu_time_info *pvti = &get_pvti0()->pvti; - cycle_t ret; + u64 ret; u64 last; u32 version; @@ -142,9 +142,9 @@ static notrace cycle_t vread_pvclock(int *mode) } #endif -notrace static cycle_t vread_tsc(void) +notrace static u64 vread_tsc(void) { - cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 ret = (u64)rdtsc_ordered(); u64 last = gtod->cycle_last; if (likely(ret >= last)) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 2e25038dbd93..a7066dc1a7e9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -768,7 +768,7 @@ struct kvm_arch { spinlock_t pvclock_gtod_sync_lock; bool use_master_clock; u64 master_kernel_ns; - cycle_t master_cycle_now; + u64 master_cycle_now; struct delayed_work kvmclock_update_work; struct delayed_work kvmclock_sync_work; diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h index 3ad741b84072..448cfe1b48cf 100644 --- a/arch/x86/include/asm/pvclock.h +++ b/arch/x86/include/asm/pvclock.h @@ -14,7 +14,7 @@ static inline struct pvclock_vsyscall_time_info *pvclock_pvti_cpu0_va(void) #endif /* some helper functions for xen and kvm pv clock sources */ -cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); +u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src); u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src); void pvclock_set_flags(u8 flags); unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src); @@ -87,11 +87,10 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift) } static __always_inline -cycle_t __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, - u64 tsc) +u64 __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src, u64 tsc) { u64 delta = tsc - src->tsc_timestamp; - cycle_t offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, + u64 offset = pvclock_scale_delta(delta, src->tsc_to_system_mul, src->tsc_shift); return src->system_time + offset; } diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index abb1fdcc545a..f5e6f1c417df 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h @@ -29,7 +29,7 @@ static inline cycles_t get_cycles(void) return rdtsc(); } -extern struct system_counterval_t convert_art_to_tsc(cycle_t art); +extern struct system_counterval_t convert_art_to_tsc(u64 art); extern void tsc_init(void); extern void mark_tsc_unstable(char *reason); diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 3a01996db58f..022e59714562 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -17,8 +17,8 @@ struct vsyscall_gtod_data { unsigned seq; int vclock_mode; - cycle_t cycle_last; - cycle_t mask; + u64 cycle_last; + u64 mask; u32 mult; u32 shift; diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c index 202a7817beaf..65721dc73bd8 100644 --- a/arch/x86/kernel/apb_timer.c +++ b/arch/x86/kernel/apb_timer.c @@ -247,7 +247,7 @@ void apbt_setup_secondary_clock(void) {} static int apbt_clocksource_register(void) { u64 start, now; - cycle_t t1; + u64 t1; /* Start the counter, use timer 2 as source, timer 0/1 for event */ dw_apb_clocksource_start(clocksource_apbt); @@ -355,7 +355,7 @@ unsigned long apbt_quick_calibrate(void) { int i, scale; u64 old, new; - cycle_t t1, t2; + u64 t1, t2; unsigned long khz = 0; u32 loop, shift; diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c index f37e02e41a77..65e20c97e04b 100644 --- a/arch/x86/kernel/cpu/mshyperv.c +++ b/arch/x86/kernel/cpu/mshyperv.c @@ -133,9 +133,9 @@ static uint32_t __init ms_hyperv_platform(void) return 0; } -static cycle_t read_hv_clock(struct clocksource *arg) +static u64 read_hv_clock(struct clocksource *arg) { - cycle_t current_tick; + u64 current_tick; /* * Read the partition counter to get the current tick count. This count * is set to 0 when the partition is created and is incremented in diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index 38c8fd684d38..85e87b46c318 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -791,7 +791,7 @@ static union hpet_lock hpet __cacheline_aligned = { { .lock = __ARCH_SPIN_LOCK_UNLOCKED, }, }; -static cycle_t read_hpet(struct clocksource *cs) +static u64 read_hpet(struct clocksource *cs) { unsigned long flags; union hpet_lock old, new; @@ -802,7 +802,7 @@ static cycle_t read_hpet(struct clocksource *cs) * Read HPET directly if in NMI. */ if (in_nmi()) - return (cycle_t)hpet_readl(HPET_COUNTER); + return (u64)hpet_readl(HPET_COUNTER); /* * Read the current state of the lock and HPET value atomically. @@ -821,7 +821,7 @@ static cycle_t read_hpet(struct clocksource *cs) WRITE_ONCE(hpet.value, new.value); arch_spin_unlock(&hpet.lock); local_irq_restore(flags); - return (cycle_t)new.value; + return (u64)new.value; } local_irq_restore(flags); @@ -843,15 +843,15 @@ contended: new.lockval = READ_ONCE(hpet.lockval); } while ((new.value == old.value) && arch_spin_is_locked(&new.lock)); - return (cycle_t)new.value; + return (u64)new.value; } #else /* * For UP or 32-bit. */ -static cycle_t read_hpet(struct clocksource *cs) +static u64 read_hpet(struct clocksource *cs) { - return (cycle_t)hpet_readl(HPET_COUNTER); + return (u64)hpet_readl(HPET_COUNTER); } #endif @@ -867,7 +867,7 @@ static struct clocksource clocksource_hpet = { static int hpet_clocksource_register(void) { u64 start, now; - cycle_t t1; + u64 t1; /* Start the counter */ hpet_restart_counter(); diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 60b9949f1e65..2a5cafdf8808 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c @@ -32,7 +32,7 @@ static int kvmclock __ro_after_init = 1; static int msr_kvm_system_time = MSR_KVM_SYSTEM_TIME; static int msr_kvm_wall_clock = MSR_KVM_WALL_CLOCK; -static cycle_t kvm_sched_clock_offset; +static u64 kvm_sched_clock_offset; static int parse_no_kvmclock(char *arg) { @@ -79,10 +79,10 @@ static int kvm_set_wallclock(const struct timespec *now) return -1; } -static cycle_t kvm_clock_read(void) +static u64 kvm_clock_read(void) { struct pvclock_vcpu_time_info *src; - cycle_t ret; + u64 ret; int cpu; preempt_disable_notrace(); @@ -93,12 +93,12 @@ static cycle_t kvm_clock_read(void) return ret; } -static cycle_t kvm_clock_get_cycles(struct clocksource *cs) +static u64 kvm_clock_get_cycles(struct clocksource *cs) { return kvm_clock_read(); } -static cycle_t kvm_sched_clock_read(void) +static u64 kvm_sched_clock_read(void) { return kvm_clock_read() - kvm_sched_clock_offset; } diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c index 5b2cc889ce34..9e93fe5803b4 100644 --- a/arch/x86/kernel/pvclock.c +++ b/arch/x86/kernel/pvclock.c @@ -71,10 +71,10 @@ u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src) return flags & valid_flags; } -cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) +u64 pvclock_clocksource_read(struct pvclock_vcpu_time_info *src) { unsigned version; - cycle_t ret; + u64 ret; u64 last; u8 flags; diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 0aed75a1e31b..be3a49ee0356 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1101,9 +1101,9 @@ static void tsc_resume(struct clocksource *cs) * checking the result of read_tsc() - cycle_last for being negative. * That works because CLOCKSOURCE_MASK(64) does not mask out any bit. */ -static cycle_t read_tsc(struct clocksource *cs) +static u64 read_tsc(struct clocksource *cs) { - return (cycle_t)rdtsc_ordered(); + return (u64)rdtsc_ordered(); } /* @@ -1192,7 +1192,7 @@ int unsynchronized_tsc(void) /* * Convert ART to TSC given numerator/denominator found in detect_art() */ -struct system_counterval_t convert_art_to_tsc(cycle_t art) +struct system_counterval_t convert_art_to_tsc(u64 art) { u64 tmp, res, rem; diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 34a66b2d47e6..5fe290c1b7d8 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1106,7 +1106,7 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic) now = ktime_get(); remaining = ktime_sub(apic->lapic_timer.target_expiration, now); if (ktime_to_ns(remaining) < 0) - remaining = ktime_set(0, 0); + remaining = 0; ns = mod_64(ktime_to_ns(remaining), apic->lapic_timer.period); tmcct = div64_u64(ns, @@ -2057,7 +2057,7 @@ void kvm_inject_apic_timer_irqs(struct kvm_vcpu *vcpu) apic->lapic_timer.tscdeadline = 0; if (apic_lvtt_oneshot(apic)) { apic->lapic_timer.tscdeadline = 0; - apic->lapic_timer.target_expiration = ktime_set(0, 0); + apic->lapic_timer.target_expiration = 0; } atomic_set(&apic->lapic_timer.pending, 0); } diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6414fa6cb9fd..51ccfe08e32f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -1131,8 +1131,8 @@ struct pvclock_gtod_data { struct { /* extract of a clocksource struct */ int vclock_mode; - cycle_t cycle_last; - cycle_t mask; + u64 cycle_last; + u64 mask; u32 mult; u32 shift; } clock; @@ -1572,9 +1572,9 @@ static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment) #ifdef CONFIG_X86_64 -static cycle_t read_tsc(void) +static u64 read_tsc(void) { - cycle_t ret = (cycle_t)rdtsc_ordered(); + u64 ret = (u64)rdtsc_ordered(); u64 last = pvclock_gtod_data.clock.cycle_last; if (likely(ret >= last)) @@ -1592,7 +1592,7 @@ static cycle_t read_tsc(void) return last; } -static inline u64 vgettsc(cycle_t *cycle_now) +static inline u64 vgettsc(u64 *cycle_now) { long v; struct pvclock_gtod_data *gtod = &pvclock_gtod_data; @@ -1603,7 +1603,7 @@ static inline u64 vgettsc(cycle_t *cycle_now) return v * gtod->clock.mult; } -static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) +static int do_monotonic_boot(s64 *t, u64 *cycle_now) { struct pvclock_gtod_data *gtod = &pvclock_gtod_data; unsigned long seq; @@ -1624,7 +1624,7 @@ static int do_monotonic_boot(s64 *t, cycle_t *cycle_now) } /* returns true if host is using tsc clocksource */ -static bool kvm_get_time_and_clockread(s64 *kernel_ns, cycle_t *cycle_now) +static bool kvm_get_time_and_clockread(s64 *kernel_ns, u64 *cycle_now) { /* checked again under seqlock below */ if (pvclock_gtod_data.clock.vclock_mode != VCLOCK_TSC) diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index 4ca0d78adcf0..d3289d7e78fa 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -916,7 +916,7 @@ static unsigned long lguest_tsc_khz(void) * If we can't use the TSC, the kernel falls back to our lower-priority * "lguest_clock", where we read the time value given to us by the Host. */ -static cycle_t lguest_clock_read(struct clocksource *cs) +static u64 lguest_clock_read(struct clocksource *cs) { unsigned long sec, nsec; diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c index b333fc45f9ec..2ee7632d4916 100644 --- a/arch/x86/platform/uv/uv_time.c +++ b/arch/x86/platform/uv/uv_time.c @@ -30,7 +30,7 @@ #define RTC_NAME "sgi_rtc" -static cycle_t uv_read_rtc(struct clocksource *cs); +static u64 uv_read_rtc(struct clocksource *cs); static int uv_rtc_next_event(unsigned long, struct clock_event_device *); static int uv_rtc_shutdown(struct clock_event_device *evt); @@ -38,7 +38,7 @@ static struct clocksource clocksource_uv = { .name = RTC_NAME, .rating = 299, .read = uv_read_rtc, - .mask = (cycle_t)UVH_RTC_REAL_TIME_CLOCK_MASK, + .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -296,7 +296,7 @@ static int uv_rtc_unset_timer(int cpu, int force) * cachelines of it's own page. This allows faster simultaneous reads * from a given socket. */ -static cycle_t uv_read_rtc(struct clocksource *cs) +static u64 uv_read_rtc(struct clocksource *cs) { unsigned long offset; @@ -305,7 +305,7 @@ static cycle_t uv_read_rtc(struct clocksource *cs) else offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); + return (u64)uv_read_local_mmr(UVH_RTC | offset); } /* diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 33d8f6a7829d..1e69956d7852 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -39,10 +39,10 @@ static unsigned long xen_tsc_khz(void) return pvclock_tsc_khz(info); } -cycle_t xen_clocksource_read(void) +u64 xen_clocksource_read(void) { struct pvclock_vcpu_time_info *src; - cycle_t ret; + u64 ret; preempt_disable_notrace(); src = &__this_cpu_read(xen_vcpu)->time; @@ -51,7 +51,7 @@ cycle_t xen_clocksource_read(void) return ret; } -static cycle_t xen_clocksource_get_cycles(struct clocksource *cs) +static u64 xen_clocksource_get_cycles(struct clocksource *cs) { return xen_clocksource_read(); } diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 3cbce3b085e7..ac0a2b0f9e62 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -67,7 +67,7 @@ void xen_init_irq_ops(void); void xen_setup_timer(int cpu); void xen_setup_runstate_info(int cpu); void xen_teardown_timer(int cpu); -cycle_t xen_clocksource_read(void); +u64 xen_clocksource_read(void); void xen_setup_cpu_clockevents(void); void __init xen_init_time_ops(void); void __init xen_hvm_init_time_ops(void); diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c index be81e69b25bc..668c1056f9e4 100644 --- a/arch/xtensa/kernel/time.c +++ b/arch/xtensa/kernel/time.c @@ -34,9 +34,9 @@ unsigned long ccount_freq; /* ccount Hz */ EXPORT_SYMBOL(ccount_freq); -static cycle_t ccount_read(struct clocksource *cs) +static u64 ccount_read(struct clocksource *cs) { - return (cycle_t)get_ccount(); + return (u64)get_ccount(); } static u64 notrace ccount_sched_clock_read(void) |