diff options
Diffstat (limited to 'drivers/clocksource')
39 files changed, 678 insertions, 114 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index e2c6e43cf8ca..4866f7aa32e6 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -282,6 +282,26 @@ config CLKSRC_MPS2 select CLKSRC_MMIO select CLKSRC_OF +config ARC_TIMERS + bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_OF + help + These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores + (ARC700 as well as ARC HS38). + TIMER0 serves as clockevent while TIMER1 provides clocksource + +config ARC_TIMERS_64BIT + bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on ARC_TIMERS + select CLKSRC_OF + help + This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP) + RTC is implemented inside the core, while GFRC sits outside the core in + ARConnect IP block. Driver automatically picks one of them for clocksource + as appropriate. + config ARM_ARCH_TIMER bool select CLKSRC_OF if OF diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index cf87f407f1ad..a14111e1f087 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o +obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o diff --git a/drivers/clocksource/acpi_pm.c b/drivers/clocksource/acpi_pm.c index 28037d0b8dcd..1961e3539b57 100644 --- a/drivers/clocksource/acpi_pm.c +++ b/drivers/clocksource/acpi_pm.c @@ -58,16 +58,16 @@ u32 acpi_pm_read_verified(void) return v2; } -static cycle_t acpi_pm_read(struct clocksource *cs) +static u64 acpi_pm_read(struct clocksource *cs) { - return (cycle_t)read_pmtmr(); + return (u64)read_pmtmr(); } static struct clocksource clocksource_acpi_pm = { .name = "acpi_pm", .rating = 200, .read = acpi_pm_read, - .mask = (cycle_t)ACPI_PM_MASK, + .mask = (u64)ACPI_PM_MASK, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; @@ -81,9 +81,9 @@ static int __init acpi_pm_good_setup(char *__str) } __setup("acpi_pm_good", acpi_pm_good_setup); -static cycle_t acpi_pm_read_slow(struct clocksource *cs) +static u64 acpi_pm_read_slow(struct clocksource *cs) { - return (cycle_t)acpi_pm_read_verified(); + return (u64)acpi_pm_read_verified(); } static inline void acpi_pm_need_workaround(void) @@ -145,7 +145,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, PCI_DEVICE_ID_SERVERWORKS_LE, */ static int verify_pmtmr_rate(void) { - cycle_t value1, value2; + u64 value1, value2; unsigned long count, delta; mach_prepare_counter(); @@ -175,7 +175,7 @@ static int verify_pmtmr_rate(void) static int __init init_acpi_pm_clocksource(void) { - cycle_t value1, value2; + u64 value1, value2; unsigned int i, j = 0; if (!pmtmr_ioport) diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c new file mode 100644 index 000000000000..7517f959cba7 --- /dev/null +++ b/drivers/clocksource/arc_timer.c @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be + * programmed to go from @count to @limit and optionally interrupt. + * We've designated TIMER0 for clockevents and TIMER1 for clocksource + * + * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP) + * which are suitable for UP and SMP based clocksources respectively + */ + +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/of_irq.h> + +#include <soc/arc/timers.h> +#include <soc/arc/mcip.h> + + +static unsigned long arc_timer_freq; + +static int noinline arc_get_timer_clk(struct device_node *node) +{ + struct clk *clk; + int ret; + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("timer missing clk"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + return ret; + } + + arc_timer_freq = clk_get_rate(clk); + + return 0; +} + +/********** Clock Source Device *********/ + +#ifdef CONFIG_ARC_TIMERS_64BIT + +static u64 arc_read_gfrc(struct clocksource *cs) +{ + unsigned long flags; + u32 l, h; + + local_irq_save(flags); + + __mcip_cmd(CMD_GFRC_READ_LO, 0); + l = read_aux_reg(ARC_REG_MCIP_READBACK); + + __mcip_cmd(CMD_GFRC_READ_HI, 0); + h = read_aux_reg(ARC_REG_MCIP_READBACK); + + local_irq_restore(flags); + + return (((u64)h) << 32) | l; +} + +static struct clocksource arc_counter_gfrc = { + .name = "ARConnect GFRC", + .rating = 400, + .read = arc_read_gfrc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_gfrc(struct device_node *node) +{ + struct mcip_bcr mp; + int ret; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + if (!mp.gfrc) { + pr_warn("Global-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); + +#define AUX_RTC_CTRL 0x103 +#define AUX_RTC_LOW 0x104 +#define AUX_RTC_HIGH 0x105 + +static u64 arc_read_rtc(struct clocksource *cs) +{ + unsigned long status; + u32 l, h; + + /* + * hardware has an internal state machine which tracks readout of + * low/high and updates the CTRL.status if + * - interrupt/exception taken between the two reads + * - high increments after low has been read + */ + do { + l = read_aux_reg(AUX_RTC_LOW); + h = read_aux_reg(AUX_RTC_HIGH); + status = read_aux_reg(AUX_RTC_CTRL); + } while (!(status & _BITUL(31))); + + return (((u64)h) << 32) | l; +} + +static struct clocksource arc_counter_rtc = { + .name = "ARCv2 RTC", + .rating = 350, + .read = arc_read_rtc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_rtc(struct device_node *node) +{ + struct bcr_timer timer; + int ret; + + READ_BCR(ARC_REG_TIMERS_BCR, timer); + if (!timer.rtc) { + pr_warn("Local-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) { + pr_warn("Local-64-bit-Ctr not usable in SMP"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(AUX_RTC_CTRL, 1); + + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); + +#endif + +/* + * 32bit TIMER1 to keep counting monotonically and wraparound + */ + +static u64 arc_read_timer1(struct clocksource *cs) +{ + return (u64) read_aux_reg(ARC_REG_TIMER1_CNT); +} + +static struct clocksource arc_counter_timer1 = { + .name = "ARC Timer1", + .rating = 300, + .read = arc_read_timer1, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_timer1(struct device_node *node) +{ + int ret; + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) + return -EINVAL; + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX); + write_aux_reg(ARC_REG_TIMER1_CNT, 0); + write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); + + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); +} + +/********** Clock Event Device *********/ + +static int arc_timer_irq; + +/* + * Arm the timer to interrupt after @cycles + * The distinction for oneshot/periodic is done in arc_event_timer_ack() below + */ +static void arc_timer_event_setup(unsigned int cycles) +{ + write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); + write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ + + write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); +} + + +static int arc_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + arc_timer_event_setup(delta); + return 0; +} + +static int arc_clkevent_set_periodic(struct clock_event_device *dev) +{ + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_timer_freq / HZ); + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + /* + * Note that generic IRQ core could have passed @evt for @dev_id if + * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() + */ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int irq_reenable = clockevent_state_periodic(evt); + + /* + * Any write to CTRL reg ACks the interrupt, we rewrite the + * Count when [N]ot [H]alted bit. + * And re-arm it if perioid by [I]nterrupt [E]nable bit + */ + write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + + +static int arc_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX); + enable_percpu_irq(arc_timer_irq, 0); + return 0; +} + +static int arc_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(arc_timer_irq); + return 0; +} + +/* + * clockevent setup for boot CPU + */ +static int __init arc_clockevent_setup(struct device_node *node) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int ret; + + arc_timer_irq = irq_of_parse_and_map(node, 0); + if (arc_timer_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) { + pr_err("clockevent: missing clk"); + return ret; + } + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", evt); + if (ret) { + pr_err("clockevent: unable to request irq\n"); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "clockevents/arc/timer:starting", + arc_timer_starting_cpu, + arc_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + return ret; + } + return 0; +} + +static int __init arc_of_timer_init(struct device_node *np) +{ + static int init_count = 0; + int ret; + + if (!init_count) { + init_count = 1; + ret = arc_clockevent_setup(np); + } else { + ret = arc_cs_setup_timer1(np); + } + + return ret; +} +CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c index 73c487da6d2a..4c8c3fb2e8b2 100644 --- a/drivers/clocksource/arm_arch_timer.c +++ b/drivers/clocksource/arm_arch_timer.c @@ -81,6 +81,7 @@ static struct clock_event_device __percpu *arch_timer_evt; static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI; static bool arch_timer_c3stop; static bool arch_timer_mem_use_virtual; +static bool arch_counter_suspend_stop; static bool evtstrm_enable = IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM); @@ -561,12 +562,12 @@ static u64 arch_counter_get_cntvct_mem(void) */ u64 (*arch_timer_read_counter)(void) = arch_counter_get_cntvct; -static cycle_t arch_counter_read(struct clocksource *cs) +static u64 arch_counter_read(struct clocksource *cs) { return arch_timer_read_counter(); } -static cycle_t arch_counter_read_cc(const struct cyclecounter *cc) +static u64 arch_counter_read_cc(const struct cyclecounter *cc) { return arch_timer_read_counter(); } @@ -576,7 +577,7 @@ static struct clocksource clocksource_counter = { .rating = 400, .read = arch_counter_read, .mask = CLOCKSOURCE_MASK(56), - .flags = CLOCK_SOURCE_IS_CONTINUOUS | CLOCK_SOURCE_SUSPEND_NONSTOP, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static struct cyclecounter cyclecounter = { @@ -616,6 +617,8 @@ static void __init arch_counter_register(unsigned type) arch_timer_read_counter = arch_counter_get_cntvct_mem; } + if (!arch_counter_suspend_stop) + clocksource_counter.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP; start_count = arch_timer_read_counter(); clocksource_register_hz(&clocksource_counter, arch_timer_rate); cyclecounter.mult = clocksource_counter.mult; @@ -735,7 +738,7 @@ static int __init arch_timer_register(void) /* Register and immediately configure the timer on the boot CPU */ err = cpuhp_setup_state(CPUHP_AP_ARM_ARCH_TIMER_STARTING, - "AP_ARM_ARCH_TIMER_STARTING", + "clockevents/arm/arch_timer:starting", arch_timer_starting_cpu, arch_timer_dying_cpu); if (err) goto out_unreg_cpupm; @@ -907,6 +910,10 @@ static int __init arch_timer_of_init(struct device_node *np) of_property_read_bool(np, "arm,cpu-registers-not-fw-configured")) arch_timer_uses_ppi = PHYS_SECURE_PPI; + /* On some systems, the counter stops ticking when in suspend. */ + arch_counter_suspend_stop = of_property_read_bool(np, + "arm,no-tick-in-suspend"); + return arch_timer_init(); } CLOCKSOURCE_OF_DECLARE(armv7_arch_timer, "arm,armv7-timer", arch_timer_of_init); @@ -964,8 +971,9 @@ static int __init arch_timer_mem_init(struct device_node *np) } ret= -ENXIO; - base = arch_counter_base = of_iomap(best_frame, 0); - if (!base) { + base = arch_counter_base = of_io_request_and_map(best_frame, 0, + "arch_mem_timer"); + if (IS_ERR(base)) { pr_err("arch_timer: Can't map frame's registers\n"); goto out; } diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c index 8da03298f844..123ed20ac2ff 100644 --- a/drivers/clocksource/arm_global_timer.c +++ b/drivers/clocksource/arm_global_timer.c @@ -195,7 +195,7 @@ static int gt_dying_cpu(unsigned int cpu) return 0; } -static cycle_t gt_clocksource_read(struct clocksource *cs) +static u64 gt_clocksource_read(struct clocksource *cs) { return gt_counter_read(); } @@ -316,7 +316,7 @@ static int __init global_timer_of_register(struct device_node *np) goto out_irq; err = cpuhp_setup_state(CPUHP_AP_ARM_GLOBAL_TIMER_STARTING, - "AP_ARM_GLOBAL_TIMER_STARTING", + "clockevents/arm/global_timer:starting", gt_starting_cpu, gt_dying_cpu); if (err) goto out_irq; diff --git a/drivers/clocksource/bcm2835_timer.c b/drivers/clocksource/bcm2835_timer.c index e71acf231c89..f2f29d2be1cf 100644 --- a/drivers/clocksource/bcm2835_timer.c +++ b/drivers/clocksource/bcm2835_timer.c @@ -96,7 +96,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = of_property_read_u32(node, "clock-frequency", &freq); if (ret) { pr_err("Can't read clock-frequency"); - return ret; + goto err_iounmap; } system_clock = base + REG_COUNTER_LO; @@ -108,13 +108,15 @@ static int __init bcm2835_timer_init(struct device_node *node) irq = irq_of_parse_and_map(node, DEFAULT_TIMER); if (irq <= 0) { pr_err("Can't parse IRQ"); - return -EINVAL; + ret = -EINVAL; + goto err_iounmap; } timer = kzalloc(sizeof(*timer), GFP_KERNEL); if (!timer) { pr_err("Can't allocate timer struct\n"); - return -ENOMEM; + ret = -ENOMEM; + goto err_iounmap; } timer->control = base + REG_CONTROL; @@ -133,7 +135,7 @@ static int __init bcm2835_timer_init(struct device_node *node) ret = setup_irq(irq, &timer->act); if (ret) { pr_err("Can't set up timer IRQ\n"); - return ret; + goto err_iounmap; } clockevents_config_and_register(&timer->evt, freq, 0xf, 0xffffffff); @@ -141,6 +143,10 @@ static int __init bcm2835_timer_init(struct device_node *node) pr_info("bcm2835: system timer (irq = %d)\n", irq); return 0; + +err_iounmap: + iounmap(base); + return ret; } CLOCKSOURCE_OF_DECLARE(bcm2835, "brcm,bcm2835-system-timer", bcm2835_timer_init); diff --git a/drivers/clocksource/cadence_ttc_timer.c b/drivers/clocksource/cadence_ttc_timer.c index fbfbdec13b08..44e5e951583b 100644 --- a/drivers/clocksource/cadence_ttc_timer.c +++ b/drivers/clocksource/cadence_ttc_timer.c @@ -158,11 +158,11 @@ static irqreturn_t ttc_clock_event_interrupt(int irq, void *dev_id) * * returns: Current timer counter register value **/ -static cycle_t __ttc_clocksource_read(struct clocksource *cs) +static u64 __ttc_clocksource_read(struct clocksource *cs) { struct ttc_timer *timer = &to_ttc_timer_clksrc(cs)->ttc; - return (cycle_t)readl_relaxed(timer->base_addr + + return (u64)readl_relaxed(timer->base_addr + TTC_COUNT_VAL_OFFSET); } diff --git a/drivers/clocksource/clksrc-dbx500-prcmu.c b/drivers/clocksource/clksrc-dbx500-prcmu.c index 77a365f573d7..c69e2772658d 100644 --- a/drivers/clocksource/clksrc-dbx500-prcmu.c +++ b/drivers/clocksource/clksrc-dbx500-prcmu.c @@ -30,7 +30,7 @@ static void __iomem *clksrc_dbx500_timer_base; -static cycle_t notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) +static u64 notrace clksrc_dbx500_prcmu_read(struct clocksource *cs) { void __iomem *base = clksrc_dbx500_timer_base; u32 count, count2; diff --git a/drivers/clocksource/dummy_timer.c b/drivers/clocksource/dummy_timer.c index 89f1c2edbe02..01f3f5a59bc6 100644 --- a/drivers/clocksource/dummy_timer.c +++ b/drivers/clocksource/dummy_timer.c @@ -34,7 +34,7 @@ static int dummy_timer_starting_cpu(unsigned int cpu) static int __init dummy_timer_register(void) { return cpuhp_setup_state(CPUHP_AP_DUMMY_TIMER_STARTING, - "AP_DUMMY_TIMER_STARTING", + "clockevents/dummy_timer:starting", dummy_timer_starting_cpu, NULL); } early_initcall(dummy_timer_register); diff --git a/drivers/clocksource/dw_apb_timer.c b/drivers/clocksource/dw_apb_timer.c index 797505aa2ba4..63e4f5519577 100644 --- a/drivers/clocksource/dw_apb_timer.c +++ b/drivers/clocksource/dw_apb_timer.c @@ -348,7 +348,7 @@ void dw_apb_clocksource_start(struct dw_apb_clocksource *dw_cs) dw_apb_clocksource_read(dw_cs); } -static cycle_t __apbt_read_clocksource(struct clocksource *cs) +static u64 __apbt_read_clocksource(struct clocksource *cs) { u32 current_count; struct dw_apb_clocksource *dw_cs = @@ -357,7 +357,7 @@ static cycle_t __apbt_read_clocksource(struct clocksource *cs) current_count = apbt_readl_relaxed(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); - return (cycle_t)~current_count; + return (u64)~current_count; } static void apbt_restart_clocksource(struct clocksource *cs) @@ -416,7 +416,7 @@ void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs) * * @dw_cs: The clocksource to read. */ -cycle_t dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) +u64 dw_apb_clocksource_read(struct dw_apb_clocksource *dw_cs) { - return (cycle_t)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); + return (u64)~apbt_readl(&dw_cs->timer, APBTMR_N_CURRENT_VALUE); } diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c index 19bb1792d647..aff87df07449 100644 --- a/drivers/clocksource/em_sti.c +++ b/drivers/clocksource/em_sti.c @@ -110,9 +110,9 @@ static void em_sti_disable(struct em_sti_priv *p) clk_disable_unprepare(p->clk); } -static cycle_t em_sti_count(struct em_sti_priv *p) +static u64 em_sti_count(struct em_sti_priv *p) { - cycle_t ticks; + u64 ticks; unsigned long flags; /* the STI hardware buffers the 48-bit count, but to @@ -121,14 +121,14 @@ static cycle_t em_sti_count(struct em_sti_priv *p) * Always read STI_COUNT_H before STI_COUNT_L. */ raw_spin_lock_irqsave(&p->lock, flags); - ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; + ticks = (u64)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32; ticks |= em_sti_read(p, STI_COUNT_L); raw_spin_unlock_irqrestore(&p->lock, flags); return ticks; } -static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next) +static u64 em_sti_set_next(struct em_sti_priv *p, u64 next) { unsigned long flags; @@ -198,7 +198,7 @@ static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs) return container_of(cs, struct em_sti_priv, cs); } -static cycle_t em_sti_clocksource_read(struct clocksource *cs) +static u64 em_sti_clocksource_read(struct clocksource *cs) { return em_sti_count(cs_to_em_sti(cs)); } @@ -271,7 +271,7 @@ static int em_sti_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct em_sti_priv *p = ced_to_em_sti(ced); - cycle_t next; + u64 next; int safe; next = em_sti_set_next(p, em_sti_count(p) + delta); diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 8f3488b80896..4da1dc2278bd 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -183,7 +183,7 @@ static u64 exynos4_read_count_64(void) hi2 = readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_U); } while (hi != hi2); - return ((cycle_t)hi << 32) | lo; + return ((u64)hi << 32) | lo; } /** @@ -199,7 +199,7 @@ static u32 notrace exynos4_read_count_32(void) return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L); } -static cycle_t exynos4_frc_read(struct clocksource *cs) +static u64 exynos4_frc_read(struct clocksource *cs) { return exynos4_read_count_32(); } @@ -266,7 +266,7 @@ static void exynos4_mct_comp0_stop(void) static void exynos4_mct_comp0_start(bool periodic, unsigned long cycles) { unsigned int tcon; - cycle_t comp_cycle; + u64 comp_cycle; tcon = readl_relaxed(reg_base + EXYNOS4_MCT_G_TCON); @@ -552,7 +552,7 @@ static int __init exynos4_timer_resources(struct device_node *np, void __iomem * /* Install hotplug callbacks which configure the timer on this CPU */ err = cpuhp_setup_state(CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, - "AP_EXYNOS4_MCT_TIMER_STARTING", + "clockevents/exynos4/mct_timer:starting", exynos4_mct_starting_cpu, exynos4_mct_dying_cpu); if (err) diff --git a/drivers/clocksource/h8300_timer16.c b/drivers/clocksource/h8300_timer16.c index 07d9d5be9054..5b27fb9997c2 100644 --- a/drivers/clocksource/h8300_timer16.c +++ b/drivers/clocksource/h8300_timer16.c @@ -72,7 +72,7 @@ static inline struct timer16_priv *cs_to_priv(struct clocksource *cs) return container_of(cs, struct timer16_priv, cs); } -static cycle_t timer16_clocksource_read(struct clocksource *cs) +static u64 timer16_clocksource_read(struct clocksource *cs) { struct timer16_priv *p = cs_to_priv(cs); unsigned long raw, value; diff --git a/drivers/clocksource/h8300_tpu.c b/drivers/clocksource/h8300_tpu.c index 7bdf1991c847..72e1cf2b3096 100644 --- a/drivers/clocksource/h8300_tpu.c +++ b/drivers/clocksource/h8300_tpu.c @@ -64,7 +64,7 @@ static inline struct tpu_priv *cs_to_priv(struct clocksource *cs) return container_of(cs, struct tpu_priv, cs); } -static cycle_t tpu_clocksource_read(struct clocksource *cs) +static u64 tpu_clocksource_read(struct clocksource *cs) { struct tpu_priv *p = cs_to_priv(cs); unsigned long flags; diff --git a/drivers/clocksource/i8253.c b/drivers/clocksource/i8253.c index 0efd36e483ab..64f6490740d7 100644 --- a/drivers/clocksource/i8253.c +++ b/drivers/clocksource/i8253.c @@ -25,7 +25,7 @@ EXPORT_SYMBOL(i8253_lock); * to just read by itself. So use jiffies to emulate a free * running counter: */ -static cycle_t i8253_read(struct clocksource *cs) +static u64 i8253_read(struct clocksource *cs) { static int old_count; static u32 old_jifs; @@ -83,7 +83,7 @@ static cycle_t i8253_read(struct clocksource *cs) count = (PIT_LATCH - 1) - count; - return (cycle_t)(jifs * PIT_LATCH) + count; + return (u64)(jifs * PIT_LATCH) + count; } static struct clocksource i8253_cs = { diff --git a/drivers/clocksource/jcore-pit.c b/drivers/clocksource/jcore-pit.c index 54e1665aa03c..7c61226f4359 100644 --- a/drivers/clocksource/jcore-pit.c +++ b/drivers/clocksource/jcore-pit.c @@ -57,7 +57,7 @@ static notrace u64 jcore_sched_clock_read(void) return seclo * NSEC_PER_SEC + nsec; } -static cycle_t jcore_clocksource_read(struct clocksource *cs) +static u64 jcore_clocksource_read(struct clocksource *cs) { return jcore_sched_clock_read(); } @@ -240,7 +240,7 @@ static int __init jcore_pit_init(struct device_node *node) } cpuhp_setup_state(CPUHP_AP_JCORE_TIMER_STARTING, - "AP_JCORE_TIMER_STARTING", + "clockevents/jcore:starting", jcore_pit_local_init, NULL); return 0; diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c index a80ab3e446b7..6fcf96540631 100644 --- a/drivers/clocksource/metag_generic.c +++ b/drivers/clocksource/metag_generic.c @@ -56,7 +56,7 @@ static int metag_timer_set_next_event(unsigned long delta, return 0; } -static cycle_t metag_clocksource_read(struct clocksource *cs) +static u64 metag_clocksource_read(struct clocksource *cs) { return __core_reg_get(TXTIMER); } @@ -154,6 +154,6 @@ int __init metag_generic_timer_init(void) /* Hook cpu boot to configure the CPU's timers */ return cpuhp_setup_state(CPUHP_AP_METAG_TIMER_STARTING, - "AP_METAG_TIMER_STARTING", + "clockevents/metag:starting", arch_timer_starting_cpu, NULL); } diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c index 7a960cd01104..d9ef7a61e093 100644 --- a/drivers/clocksource/mips-gic-timer.c +++ b/drivers/clocksource/mips-gic-timer.c @@ -120,12 +120,12 @@ static int gic_clockevent_init(void) } cpuhp_setup_state(CPUHP_AP_MIPS_GIC_TIMER_STARTING, - "AP_MIPS_GIC_TIMER_STARTING", gic_starting_cpu, - gic_dying_cpu); + "clockevents/mips/gic/timer:starting", + gic_starting_cpu, gic_dying_cpu); return 0; } -static cycle_t gic_hpt_read(struct clocksource *cs) +static u64 gic_hpt_read(struct clocksource *cs) { return gic_read_count(); } diff --git a/drivers/clocksource/mmio.c b/drivers/clocksource/mmio.c index c4f7d7a9b689..4c4df981d8cc 100644 --- a/drivers/clocksource/mmio.c +++ b/drivers/clocksource/mmio.c @@ -20,24 +20,24 @@ static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c) return container_of(c, struct clocksource_mmio, clksrc); } -cycle_t clocksource_mmio_readl_up(struct clocksource *c) +u64 clocksource_mmio_readl_up(struct clocksource *c) { - return (cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg); + return (u64)readl_relaxed(to_mmio_clksrc(c)->reg); } -cycle_t clocksource_mmio_readl_down(struct clocksource *c) +u64 clocksource_mmio_readl_down(struct clocksource *c) { - return ~(cycle_t)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; + return ~(u64)readl_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } -cycle_t clocksource_mmio_readw_up(struct clocksource *c) +u64 clocksource_mmio_readw_up(struct clocksource *c) { - return (cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg); + return (u64)readw_relaxed(to_mmio_clksrc(c)->reg); } -cycle_t clocksource_mmio_readw_down(struct clocksource *c) +u64 clocksource_mmio_readw_down(struct clocksource *c) { - return ~(cycle_t)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; + return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask; } /** @@ -51,7 +51,7 @@ cycle_t clocksource_mmio_readw_down(struct clocksource *c) */ int __init clocksource_mmio_init(void __iomem *base, const char *name, unsigned long hz, int rating, unsigned bits, - cycle_t (*read)(struct clocksource *)) + u64 (*read)(struct clocksource *)) { struct clocksource_mmio *cs; diff --git a/drivers/clocksource/moxart_timer.c b/drivers/clocksource/moxart_timer.c index 2a8f4705c734..7f3430654fbd 100644 --- a/drivers/clocksource/moxart_timer.c +++ b/drivers/clocksource/moxart_timer.c @@ -161,19 +161,22 @@ static int __init moxart_timer_init(struct device_node *node) timer->base = of_iomap(node, 0); if (!timer->base) { pr_err("%s: of_iomap failed\n", node->full_name); - return -ENXIO; + ret = -ENXIO; + goto out_free; } irq = irq_of_parse_and_map(node, 0); if (irq <= 0) { pr_err("%s: irq_of_parse_and_map failed\n", node->full_name); - return -EINVAL; + ret = -EINVAL; + goto out_unmap; } clk = of_clk_get(node, 0); if (IS_ERR(clk)) { pr_err("%s: of_clk_get failed\n", node->full_name); - return PTR_ERR(clk); + ret = PTR_ERR(clk); + goto out_unmap; } pclk = clk_get_rate(clk); @@ -186,7 +189,8 @@ static int __init moxart_timer_init(struct device_node *node) timer->t1_disable_val = ASPEED_TIMER1_DISABLE; } else { pr_err("%s: unknown platform\n", node->full_name); - return -EINVAL; + ret = -EINVAL; + goto out_unmap; } timer->count_per_tick = DIV_ROUND_CLOSEST(pclk, HZ); @@ -208,14 +212,14 @@ static int __init moxart_timer_init(struct device_node *node) clocksource_mmio_readl_down); if (ret) { pr_err("%s: clocksource_mmio_init failed\n", node->full_name); - return ret; + goto out_unmap; } ret = request_irq(irq, moxart_timer_interrupt, IRQF_TIMER, node->name, &timer->clkevt); if (ret) { pr_err("%s: setup_irq failed\n", node->full_name); - return ret; + goto out_unmap; } /* Clear match registers */ @@ -241,6 +245,12 @@ static int __init moxart_timer_init(struct device_node *node) clockevents_config_and_register(&timer->clkevt, pclk, 0x4, 0xfffffffe); return 0; + +out_unmap: + iounmap(timer->base); +out_free: + kfree(timer); + return ret; } CLOCKSOURCE_OF_DECLARE(moxart, "moxa,moxart-timer", moxart_timer_init); CLOCKSOURCE_OF_DECLARE(aspeed, "aspeed,ast2400-timer", moxart_timer_init); diff --git a/drivers/clocksource/mxs_timer.c b/drivers/clocksource/mxs_timer.c index 0ba0a913b41d..99b77aff0839 100644 --- a/drivers/clocksource/mxs_timer.c +++ b/drivers/clocksource/mxs_timer.c @@ -97,7 +97,7 @@ static void timrot_irq_acknowledge(void) HW_TIMROT_TIMCTRLn(0) + STMP_OFFSET_REG_CLR); } -static cycle_t timrotv1_get_cycles(struct clocksource *cs) +static u64 timrotv1_get_cycles(struct clocksource *cs) { return ~((__raw_readl(mxs_timrot_base + HW_TIMROT_TIMCOUNTn(1)) & 0xffff0000) >> 16); diff --git a/drivers/clocksource/pxa_timer.c b/drivers/clocksource/pxa_timer.c index 3e1cb512f3ce..9cae38eebec2 100644 --- a/drivers/clocksource/pxa_timer.c +++ b/drivers/clocksource/pxa_timer.c @@ -220,17 +220,16 @@ CLOCKSOURCE_OF_DECLARE(pxa_timer, "marvell,pxa-timer", pxa_timer_dt_init); /* * Legacy timer init for non device-tree boards. */ -void __init pxa_timer_nodt_init(int irq, void __iomem *base, - unsigned long clock_tick_rate) +void __init pxa_timer_nodt_init(int irq, void __iomem *base) { struct clk *clk; timer_base = base; clk = clk_get(NULL, "OSTIMER0"); - if (clk && !IS_ERR(clk)) + if (clk && !IS_ERR(clk)) { clk_prepare_enable(clk); - else + pxa_timer_common_init(irq, clk_get_rate(clk)); + } else { pr_crit("%s: unable to get clk\n", __func__); - - pxa_timer_common_init(irq, clock_tick_rate); + } } diff --git a/drivers/clocksource/qcom-timer.c b/drivers/clocksource/qcom-timer.c index 3283cfa2aa52..ee358cdf4a07 100644 --- a/drivers/clocksource/qcom-timer.c +++ b/drivers/clocksource/qcom-timer.c @@ -89,7 +89,7 @@ static struct clock_event_device __percpu *msm_evt; static void __iomem *source_base; -static notrace cycle_t msm_read_timer_count(struct clocksource *cs) +static notrace u64 msm_read_timer_count(struct clocksource *cs) { return readl_relaxed(source_base + TIMER_COUNT_VAL); } @@ -182,7 +182,7 @@ static int __init msm_timer_init(u32 dgt_hz, int sched_bits, int irq, } else { /* Install and invoke hotplug callbacks */ res = cpuhp_setup_state(CPUHP_AP_QCOM_TIMER_STARTING, - "AP_QCOM_TIMER_STARTING", + "clockevents/qcom/timer:starting", msm_local_timer_starting_cpu, msm_local_timer_dying_cpu); if (res) { diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c index 54565bd0093b..0093ece661fe 100644 --- a/drivers/clocksource/samsung_pwm_timer.c +++ b/drivers/clocksource/samsung_pwm_timer.c @@ -307,7 +307,7 @@ static void samsung_clocksource_resume(struct clocksource *cs) samsung_time_start(pwm.source_id, true); } -static cycle_t notrace samsung_clocksource_read(struct clocksource *c) +static u64 notrace samsung_clocksource_read(struct clocksource *c) { return ~readl_relaxed(pwm.source_reg); } diff --git a/drivers/clocksource/scx200_hrt.c b/drivers/clocksource/scx200_hrt.c index 64f9e8294434..a46660bf6588 100644 --- a/drivers/clocksource/scx200_hrt.c +++ b/drivers/clocksource/scx200_hrt.c @@ -43,10 +43,10 @@ MODULE_PARM_DESC(ppm, "+-adjust to actual XO freq (ppm)"); /* The base timer frequency, * 27 if selected */ #define HRT_FREQ 1000000 -static cycle_t read_hrt(struct clocksource *cs) +static u64 read_hrt(struct clocksource *cs) { /* Read the timer value */ - return (cycle_t) inl(scx200_cb_base + SCx200_TIMER_OFFSET); + return (u64) inl(scx200_cb_base + SCx200_TIMER_OFFSET); } static struct clocksource cs_hrt = { diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c index 103c49362c68..28757edf6aca 100644 --- a/drivers/clocksource/sh_cmt.c +++ b/drivers/clocksource/sh_cmt.c @@ -612,7 +612,7 @@ static struct sh_cmt_channel *cs_to_sh_cmt(struct clocksource *cs) return container_of(cs, struct sh_cmt_channel, cs); } -static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) +static u64 sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); unsigned long flags, raw; diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c index 469e776ec17a..1fbf2aadcfd4 100644 --- a/drivers/clocksource/sh_tmu.c +++ b/drivers/clocksource/sh_tmu.c @@ -255,7 +255,7 @@ static struct sh_tmu_channel *cs_to_sh_tmu(struct clocksource *cs) return container_of(cs, struct sh_tmu_channel, cs); } -static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) +static u64 sh_tmu_clocksource_read(struct clocksource *cs) { struct sh_tmu_channel *ch = cs_to_sh_tmu(cs); diff --git a/drivers/clocksource/tcb_clksrc.c b/drivers/clocksource/tcb_clksrc.c index 4da2af9694a2..d4ca9962a759 100644 --- a/drivers/clocksource/tcb_clksrc.c +++ b/drivers/clocksource/tcb_clksrc.c @@ -41,7 +41,7 @@ static void __iomem *tcaddr; -static cycle_t tc_get_cycles(struct clocksource *cs) +static u64 tc_get_cycles(struct clocksource *cs) { unsigned long flags; u32 lower, upper; @@ -56,7 +56,7 @@ static cycle_t tc_get_cycles(struct clocksource *cs) return (upper << 16) | lower; } -static cycle_t tc_get_cycles32(struct clocksource *cs) +static u64 tc_get_cycles32(struct clocksource *cs) { return __raw_readl(tcaddr + ATMEL_TC_REG(0, CV)); } diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c index 3c39e6f45971..4440aefc59cd 100644 --- a/drivers/clocksource/time-armada-370-xp.c +++ b/drivers/clocksource/time-armada-370-xp.c @@ -320,7 +320,7 @@ static int __init armada_370_xp_timer_common_init(struct device_node *np) } res = cpuhp_setup_state(CPUHP_AP_ARMADA_TIMER_STARTING, - "AP_ARMADA_TIMER_STARTING", + "clockevents/armada:starting", armada_370_xp_timer_starting_cpu, armada_370_xp_timer_dying_cpu); if (res) { diff --git a/drivers/clocksource/time-pistachio.c b/drivers/clocksource/time-pistachio.c index a8e6c7df853d..3710e4d9dcba 100644 --- a/drivers/clocksource/time-pistachio.c +++ b/drivers/clocksource/time-pistachio.c @@ -67,7 +67,7 @@ static inline void gpt_writel(void __iomem *base, u32 value, u32 offset, writel(value, base + 0x20 * gpt_id + offset); } -static cycle_t notrace +static u64 notrace pistachio_clocksource_read_cycles(struct clocksource *cs) { struct pistachio_clocksource *pcs = to_pistachio_clocksource(cs); @@ -84,7 +84,7 @@ pistachio_clocksource_read_cycles(struct clocksource *cs) counter = gpt_readl(pcs->base, TIMER_CURRENT_VALUE, 0); raw_spin_unlock_irqrestore(&pcs->lock, flags); - return (cycle_t)~counter; + return (u64)~counter; } static u64 notrace pistachio_read_sched_clock(void) diff --git a/drivers/clocksource/timer-atlas7.c b/drivers/clocksource/timer-atlas7.c index 4334e0330ada..3d8a181f0252 100644 --- a/drivers/clocksource/timer-atlas7.c +++ b/drivers/clocksource/timer-atlas7.c @@ -85,7 +85,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) } /* read 64-bit timer counter */ -static cycle_t sirfsoc_timer_read(struct clocksource *cs) +static u64 sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; @@ -221,7 +221,7 @@ static int __init sirfsoc_clockevent_init(void) /* Install and invoke hotplug callbacks */ return cpuhp_setup_state(CPUHP_AP_MARCO_TIMER_STARTING, - "AP_MARCO_TIMER_STARTING", + "clockevents/marco:starting", sirfsoc_local_timer_starting_cpu, sirfsoc_local_timer_dying_cpu); } diff --git a/drivers/clocksource/timer-atmel-pit.c b/drivers/clocksource/timer-atmel-pit.c index 6555821bbdae..c0b5df3167a0 100644 --- a/drivers/clocksource/timer-atmel-pit.c +++ b/drivers/clocksource/timer-atmel-pit.c @@ -73,7 +73,7 @@ static inline void pit_write(void __iomem *base, unsigned int reg_offset, unsign * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ -static cycle_t read_pit_clk(struct clocksource *cs) +static u64 read_pit_clk(struct clocksource *cs) { struct pit_data *data = clksrc_to_pit_data(cs); unsigned long flags; diff --git a/drivers/clocksource/timer-atmel-st.c b/drivers/clocksource/timer-atmel-st.c index e90ab5b63a90..be4ac7604136 100644 --- a/drivers/clocksource/timer-atmel-st.c +++ b/drivers/clocksource/timer-atmel-st.c @@ -92,7 +92,7 @@ static irqreturn_t at91rm9200_timer_interrupt(int irq, void *dev_id) return IRQ_NONE; } -static cycle_t read_clk32k(struct clocksource *cs) +static u64 read_clk32k(struct clocksource *cs) { return read_CRTR(); } diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c index 70c149af8ee0..da1f7986e477 100644 --- a/drivers/clocksource/timer-nps.c +++ b/drivers/clocksource/timer-nps.c @@ -46,35 +46,62 @@ /* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */ static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly; -static unsigned long nps_timer_rate; +static int __init nps_get_timer_clk(struct device_node *node, + unsigned long *timer_freq, + struct clk **clk) +{ + int ret; + + *clk = of_clk_get(node, 0); + ret = PTR_ERR_OR_ZERO(*clk); + if (ret) { + pr_err("timer missing clk"); + return ret; + } + + ret = clk_prepare_enable(*clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + clk_put(*clk); + return ret; + } + + *timer_freq = clk_get_rate(*clk); + if (!(*timer_freq)) { + pr_err("Couldn't get clk rate\n"); + clk_disable_unprepare(*clk); + clk_put(*clk); + return -EINVAL; + } + + return 0; +} -static cycle_t nps_clksrc_read(struct clocksource *clksrc) +static u64 nps_clksrc_read(struct clocksource *clksrc) { int cluster = raw_smp_processor_id() >> NPS_CLUSTER_OFFSET; - return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); + return (u64)ioread32be(nps_msu_reg_low_addr[cluster]); } -static int __init nps_setup_clocksource(struct device_node *node, - struct clk *clk) +static int __init nps_setup_clocksource(struct device_node *node) { int ret, cluster; + struct clk *clk; + unsigned long nps_timer1_freq; + for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) nps_msu_reg_low_addr[cluster] = nps_host_reg((cluster << NPS_CLUSTER_OFFSET), - NPS_MSU_BLKID, NPS_MSU_TICK_LOW); + NPS_MSU_BLKID, NPS_MSU_TICK_LOW); - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("Couldn't enable parent clock\n"); + ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk); + if (ret) return ret; - } - - nps_timer_rate = clk_get_rate(clk); - ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick", - nps_timer_rate, 301, 32, nps_clksrc_read); + ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick", + nps_timer1_freq, 300, 32, nps_clksrc_read); if (ret) { pr_err("Couldn't register clock source.\n"); clk_disable_unprepare(clk); @@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node, return ret; } -static int __init nps_timer_init(struct device_node *node) +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", + nps_setup_clocksource); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1", + nps_setup_clocksource); + +#ifdef CONFIG_EZNPS_MTM_EXT +#include <soc/nps/mtm.h> + +/* Timer related Aux registers */ +#define NPS_REG_TIMER0_TSI 0xFFFFF850 +#define NPS_REG_TIMER0_LIMIT 0x23 +#define NPS_REG_TIMER0_CTRL 0x22 +#define NPS_REG_TIMER0_CNT 0x21 + +/* + * Interrupt Enabled (IE) - re-arm the timer + * Not Halted (NH) - is cleared when working with JTAG (for debug) + */ +#define TIMER0_CTRL_IE BIT(0) +#define TIMER0_CTRL_NH BIT(1) + +static unsigned long nps_timer0_freq; +static unsigned long nps_timer0_irq; + +static void nps_clkevent_rm_thread(void) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + + /* remove thread from TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads &= ~(1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* Acknowledge and if needed re-arm the timer */ + if (!enabled_threads) + write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH); + else + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +static void nps_clkevent_add_thread(unsigned long delta) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + /* add thread to TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + enabled_threads |= (1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* set next timer event */ + write_aux_reg(NPS_REG_TIMER0_LIMIT, delta); + write_aux_reg(NPS_REG_TIMER0_CNT, 0); + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +/* + * Whenever anyone tries to change modes, we just mask interrupts + * and wait for the next event to get set. + */ +static int nps_clkevent_set_state(struct clock_event_device *dev) +{ + nps_clkevent_rm_thread(); + disable_percpu_irq(nps_timer0_irq); + + return 0; +} + +static int nps_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + nps_clkevent_add_thread(delta); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = { + .name = "NPS Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = nps_clkevent_set_next_event, + .set_state_oneshot = nps_clkevent_set_state, + .set_state_oneshot_stopped = nps_clkevent_set_state, + .set_state_shutdown = nps_clkevent_set_state, + .tick_resume = nps_clkevent_set_state, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + nps_clkevent_rm_thread(); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int nps_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static int nps_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(nps_timer0_irq); + return 0; +} + +static int __init nps_setup_clockevent(struct device_node *node) { struct clk *clk; + int ret; - clk = of_clk_get(node, 0); - if (IS_ERR(clk)) { - pr_err("Can't get timer clock.\n"); - return PTR_ERR(clk); + nps_timer0_irq = irq_of_parse_and_map(node, 0); + if (nps_timer0_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; } - return nps_setup_clocksource(node, clk); + ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk); + if (ret) + return ret; + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", + &nps_clockevent_device); + if (ret) { + pr_err("Couldn't request irq\n"); + clk_disable_unprepare(clk); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "clockevents/nps:starting", + nps_timer_starting_cpu, + nps_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + clk_disable_unprepare(clk); + free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); + return ret; + } + + return 0; } -CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", - nps_timer_init); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0", + nps_setup_clockevent); +#endif /* CONFIG_EZNPS_MTM_EXT */ diff --git a/drivers/clocksource/timer-prima2.c b/drivers/clocksource/timer-prima2.c index c32148ec7a38..bfa981ac1eaf 100644 --- a/drivers/clocksource/timer-prima2.c +++ b/drivers/clocksource/timer-prima2.c @@ -72,7 +72,7 @@ static irqreturn_t sirfsoc_timer_interrupt(int irq, void *dev_id) } /* read 64-bit timer counter */ -static cycle_t notrace sirfsoc_timer_read(struct clocksource *cs) +static u64 notrace sirfsoc_timer_read(struct clocksource *cs) { u64 cycles; diff --git a/drivers/clocksource/timer-sun5i.c b/drivers/clocksource/timer-sun5i.c index 4f87f3e76d83..a3e662b15964 100644 --- a/drivers/clocksource/timer-sun5i.c +++ b/drivers/clocksource/timer-sun5i.c @@ -152,7 +152,7 @@ static irqreturn_t sun5i_timer_interrupt(int irq, void *dev_id) return IRQ_HANDLED; } -static cycle_t sun5i_clksrc_read(struct clocksource *clksrc) +static u64 sun5i_clksrc_read(struct clocksource *clksrc) { struct sun5i_timer_clksrc *cs = to_sun5i_timer_clksrc(clksrc); diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c index cf5b14e442e4..624067712ef0 100644 --- a/drivers/clocksource/timer-ti-32k.c +++ b/drivers/clocksource/timer-ti-32k.c @@ -65,11 +65,11 @@ static inline struct ti_32k *to_ti_32k(struct clocksource *cs) return container_of(cs, struct ti_32k, cs); } -static cycle_t notrace ti_32k_read_cycles(struct clocksource *cs) +static u64 notrace ti_32k_read_cycles(struct clocksource *cs) { struct ti_32k *ti = to_ti_32k(cs); - return (cycle_t)readl_relaxed(ti->counter); + return (u64)readl_relaxed(ti->counter); } static struct ti_32k ti_32k_timer = { diff --git a/drivers/clocksource/vt8500_timer.c b/drivers/clocksource/vt8500_timer.c index b15069483fbd..d02b51075ad1 100644 --- a/drivers/clocksource/vt8500_timer.c +++ b/drivers/clocksource/vt8500_timer.c @@ -53,7 +53,7 @@ static void __iomem *regbase; -static cycle_t vt8500_timer_read(struct clocksource *cs) +static u64 vt8500_timer_read(struct clocksource *cs) { int loops = msecs_to_loops(10); writel(3, regbase + TIMER_CTRL_VAL); @@ -75,7 +75,7 @@ static int vt8500_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { int loops = msecs_to_loops(10); - cycle_t alarm = clocksource.read(&clocksource) + cycles; + u64 alarm = clocksource.read(&clocksource) + cycles; while ((readl(regbase + TIMER_AS_VAL) & TIMER_MATCH_W_ACTIVE) && --loops) cpu_relax(); |