diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 01:15:17 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-12-16 01:15:17 +0300 |
commit | d25b6af91ec600faaff3a7e863f19d3e16593e52 (patch) | |
tree | 30aaf6a1fa9b767779afd478189d5be58a0a2f4c /drivers/clocksource | |
parent | 179a7ba6806805bd4cd7a5e4574b83353c5615ad (diff) | |
parent | 7badf6fefca8278e749e82411fdb98b123cca50e (diff) | |
download | linux-d25b6af91ec600faaff3a7e863f19d3e16593e52.tar.xz |
Merge tag 'arc-4.10-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc
Pull ARC updates from Vineet Gupta:
"These are mostly timer/clocksource driver updates which were
Reviewed/Acked by Daniel but had to be merged via ARC tree due to
dependencies.
I will follow up with another pull request with actual ARC changes
early next week !
Summary:
- Moving ARC timer driver into drivers/clocksource
- EZChip timer driver updates [Noam]
- ARC AXS103 and HAPS platform updates [Alexey]"
* tag 'arc-4.10-rc1-part1' of git://git.kernel.org/pub/scm/linux/kernel/git/vgupta/arc:
ARC: axs10x: really enable ARC PGU
ARC: rename Zebu platform support to HAPS
clocksource: nps: avoid maybe-uninitialized warning
clocksource: Add clockevent support to NPS400 driver
clocksource: update "fn" at CLOCKSOURCE_OF_DECLARE() of nps400 timer
soc: Support for NPS HW scheduling
clocksource: import ARC timer driver
ARC: breakout timer include code into separate header ...
ARC: move mcip.h into include/soc and adjust the includes
ARC: breakout aux handling into a separate header
ARC: time: move time_init() out of the driver
ARC: timer: gfrc, rtc: build under same option (64-bit timers)
ARC: timer: gfrc, rtc: Read BCR to detect whether hardware exists ...
ARC: timer: gfrc, rtc: deuglify big endian code
Diffstat (limited to 'drivers/clocksource')
-rw-r--r-- | drivers/clocksource/Kconfig | 20 | ||||
-rw-r--r-- | drivers/clocksource/Makefile | 1 | ||||
-rw-r--r-- | drivers/clocksource/arc_timer.c | 336 | ||||
-rw-r--r-- | drivers/clocksource/timer-nps.c | 224 |
4 files changed, 561 insertions, 20 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index e2c6e43cf8ca..4866f7aa32e6 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -282,6 +282,26 @@ config CLKSRC_MPS2 select CLKSRC_MMIO select CLKSRC_OF +config ARC_TIMERS + bool "Support for 32-bit TIMERn counters in ARC Cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + select CLKSRC_OF + help + These are legacy 32-bit TIMER0 and TIMER1 counters found on all ARC cores + (ARC700 as well as ARC HS38). + TIMER0 serves as clockevent while TIMER1 provides clocksource + +config ARC_TIMERS_64BIT + bool "Support for 64-bit counters in ARC HS38 cores" if COMPILE_TEST + depends on GENERIC_CLOCKEVENTS + depends on ARC_TIMERS + select CLKSRC_OF + help + This enables 2 different 64-bit timers: RTC (for UP) and GFRC (for SMP) + RTC is implemented inside the core, while GFRC sits outside the core in + ARConnect IP block. Driver automatically picks one of them for clocksource + as appropriate. + config ARM_ARCH_TIMER bool select CLKSRC_OF if OF diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile index cf87f407f1ad..a14111e1f087 100644 --- a/drivers/clocksource/Makefile +++ b/drivers/clocksource/Makefile @@ -51,6 +51,7 @@ obj-$(CONFIG_CLKSRC_TI_32K) += timer-ti-32k.o obj-$(CONFIG_CLKSRC_NPS) += timer-nps.o obj-$(CONFIG_OXNAS_RPS_TIMER) += timer-oxnas-rps.o +obj-$(CONFIG_ARC_TIMERS) += arc_timer.o obj-$(CONFIG_ARM_ARCH_TIMER) += arm_arch_timer.o obj-$(CONFIG_ARM_GLOBAL_TIMER) += arm_global_timer.o obj-$(CONFIG_ARMV7M_SYSTICK) += armv7m_systick.o diff --git a/drivers/clocksource/arc_timer.c b/drivers/clocksource/arc_timer.c new file mode 100644 index 000000000000..a49748d826c0 --- /dev/null +++ b/drivers/clocksource/arc_timer.c @@ -0,0 +1,336 @@ +/* + * Copyright (C) 2016-17 Synopsys, Inc. (www.synopsys.com) + * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +/* ARC700 has two 32bit independent prog Timers: TIMER0 and TIMER1, Each can be + * programmed to go from @count to @limit and optionally interrupt. + * We've designated TIMER0 for clockevents and TIMER1 for clocksource + * + * ARCv2 based HS38 cores have RTC (in-core) and GFRC (inside ARConnect/MCIP) + * which are suitable for UP and SMP based clocksources respectively + */ + +#include <linux/interrupt.h> +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/cpu.h> +#include <linux/of.h> +#include <linux/of_irq.h> + +#include <soc/arc/timers.h> +#include <soc/arc/mcip.h> + + +static unsigned long arc_timer_freq; + +static int noinline arc_get_timer_clk(struct device_node *node) +{ + struct clk *clk; + int ret; + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + pr_err("timer missing clk"); + return PTR_ERR(clk); + } + + ret = clk_prepare_enable(clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + return ret; + } + + arc_timer_freq = clk_get_rate(clk); + + return 0; +} + +/********** Clock Source Device *********/ + +#ifdef CONFIG_ARC_TIMERS_64BIT + +static cycle_t arc_read_gfrc(struct clocksource *cs) +{ + unsigned long flags; + u32 l, h; + + local_irq_save(flags); + + __mcip_cmd(CMD_GFRC_READ_LO, 0); + l = read_aux_reg(ARC_REG_MCIP_READBACK); + + __mcip_cmd(CMD_GFRC_READ_HI, 0); + h = read_aux_reg(ARC_REG_MCIP_READBACK); + + local_irq_restore(flags); + + return (((cycle_t)h) << 32) | l; +} + +static struct clocksource arc_counter_gfrc = { + .name = "ARConnect GFRC", + .rating = 400, + .read = arc_read_gfrc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_gfrc(struct device_node *node) +{ + struct mcip_bcr mp; + int ret; + + READ_BCR(ARC_REG_MCIP_BCR, mp); + if (!mp.gfrc) { + pr_warn("Global-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + return clocksource_register_hz(&arc_counter_gfrc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_gfrc, "snps,archs-timer-gfrc", arc_cs_setup_gfrc); + +#define AUX_RTC_CTRL 0x103 +#define AUX_RTC_LOW 0x104 +#define AUX_RTC_HIGH 0x105 + +static cycle_t arc_read_rtc(struct clocksource *cs) +{ + unsigned long status; + u32 l, h; + + /* + * hardware has an internal state machine which tracks readout of + * low/high and updates the CTRL.status if + * - interrupt/exception taken between the two reads + * - high increments after low has been read + */ + do { + l = read_aux_reg(AUX_RTC_LOW); + h = read_aux_reg(AUX_RTC_HIGH); + status = read_aux_reg(AUX_RTC_CTRL); + } while (!(status & _BITUL(31))); + + return (((cycle_t)h) << 32) | l; +} + +static struct clocksource arc_counter_rtc = { + .name = "ARCv2 RTC", + .rating = 350, + .read = arc_read_rtc, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_rtc(struct device_node *node) +{ + struct bcr_timer timer; + int ret; + + READ_BCR(ARC_REG_TIMERS_BCR, timer); + if (!timer.rtc) { + pr_warn("Local-64-bit-Ctr clocksource not detected"); + return -ENXIO; + } + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) { + pr_warn("Local-64-bit-Ctr not usable in SMP"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(AUX_RTC_CTRL, 1); + + return clocksource_register_hz(&arc_counter_rtc, arc_timer_freq); +} +CLOCKSOURCE_OF_DECLARE(arc_rtc, "snps,archs-timer-rtc", arc_cs_setup_rtc); + +#endif + +/* + * 32bit TIMER1 to keep counting monotonically and wraparound + */ + +static cycle_t arc_read_timer1(struct clocksource *cs) +{ + return (cycle_t) read_aux_reg(ARC_REG_TIMER1_CNT); +} + +static struct clocksource arc_counter_timer1 = { + .name = "ARC Timer1", + .rating = 300, + .read = arc_read_timer1, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static int __init arc_cs_setup_timer1(struct device_node *node) +{ + int ret; + + /* Local to CPU hence not usable in SMP */ + if (IS_ENABLED(CONFIG_SMP)) + return -EINVAL; + + ret = arc_get_timer_clk(node); + if (ret) + return ret; + + write_aux_reg(ARC_REG_TIMER1_LIMIT, ARC_TIMERN_MAX); + write_aux_reg(ARC_REG_TIMER1_CNT, 0); + write_aux_reg(ARC_REG_TIMER1_CTRL, TIMER_CTRL_NH); + + return clocksource_register_hz(&arc_counter_timer1, arc_timer_freq); +} + +/********** Clock Event Device *********/ + +static int arc_timer_irq; + +/* + * Arm the timer to interrupt after @cycles + * The distinction for oneshot/periodic is done in arc_event_timer_ack() below + */ +static void arc_timer_event_setup(unsigned int cycles) +{ + write_aux_reg(ARC_REG_TIMER0_LIMIT, cycles); + write_aux_reg(ARC_REG_TIMER0_CNT, 0); /* start from 0 */ + + write_aux_reg(ARC_REG_TIMER0_CTRL, TIMER_CTRL_IE | TIMER_CTRL_NH); +} + + +static int arc_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + arc_timer_event_setup(delta); + return 0; +} + +static int arc_clkevent_set_periodic(struct clock_event_device *dev) +{ + /* + * At X Hz, 1 sec = 1000ms -> X cycles; + * 10ms -> X / 100 cycles + */ + arc_timer_event_setup(arc_timer_freq / HZ); + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { + .name = "ARC Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT | + CLOCK_EVT_FEAT_PERIODIC, + .rating = 300, + .set_next_event = arc_clkevent_set_next_event, + .set_state_periodic = arc_clkevent_set_periodic, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + /* + * Note that generic IRQ core could have passed @evt for @dev_id if + * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() + */ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int irq_reenable = clockevent_state_periodic(evt); + + /* + * Any write to CTRL reg ACks the interrupt, we rewrite the + * Count when [N]ot [H]alted bit. + * And re-arm it if perioid by [I]nterrupt [E]nable bit + */ + write_aux_reg(ARC_REG_TIMER0_CTRL, irq_reenable | TIMER_CTRL_NH); + + evt->event_handler(evt); + + return IRQ_HANDLED; +} + + +static int arc_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, arc_timer_freq, 0, ARC_TIMERN_MAX); + enable_percpu_irq(arc_timer_irq, 0); + return 0; +} + +static int arc_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(arc_timer_irq); + return 0; +} + +/* + * clockevent setup for boot CPU + */ +static int __init arc_clockevent_setup(struct device_node *node) +{ + struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); + int ret; + + arc_timer_irq = irq_of_parse_and_map(node, 0); + if (arc_timer_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; + } + + ret = arc_get_timer_clk(node); + if (ret) { + pr_err("clockevent: missing clk"); + return ret; + } + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(arc_timer_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", evt); + if (ret) { + pr_err("clockevent: unable to request irq\n"); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "AP_ARC_TIMER_STARTING", + arc_timer_starting_cpu, + arc_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + return ret; + } + return 0; +} + +static int __init arc_of_timer_init(struct device_node *np) +{ + static int init_count = 0; + int ret; + + if (!init_count) { + init_count = 1; + ret = arc_clockevent_setup(np); + } else { + ret = arc_cs_setup_timer1(np); + } + + return ret; +} +CLOCKSOURCE_OF_DECLARE(arc_clkevt, "snps,arc-timer", arc_of_timer_init); diff --git a/drivers/clocksource/timer-nps.c b/drivers/clocksource/timer-nps.c index 70c149af8ee0..8da5e93b6810 100644 --- a/drivers/clocksource/timer-nps.c +++ b/drivers/clocksource/timer-nps.c @@ -46,7 +46,36 @@ /* This array is per cluster of CPUs (Each NPS400 cluster got 256 CPUs) */ static void *nps_msu_reg_low_addr[NPS_CLUSTER_NUM] __read_mostly; -static unsigned long nps_timer_rate; +static int __init nps_get_timer_clk(struct device_node *node, + unsigned long *timer_freq, + struct clk **clk) +{ + int ret; + + *clk = of_clk_get(node, 0); + ret = PTR_ERR_OR_ZERO(*clk); + if (ret) { + pr_err("timer missing clk"); + return ret; + } + + ret = clk_prepare_enable(*clk); + if (ret) { + pr_err("Couldn't enable parent clk\n"); + clk_put(*clk); + return ret; + } + + *timer_freq = clk_get_rate(*clk); + if (!(*timer_freq)) { + pr_err("Couldn't get clk rate\n"); + clk_disable_unprepare(*clk); + clk_put(*clk); + return -EINVAL; + } + + return 0; +} static cycle_t nps_clksrc_read(struct clocksource *clksrc) { @@ -55,26 +84,24 @@ static cycle_t nps_clksrc_read(struct clocksource *clksrc) return (cycle_t)ioread32be(nps_msu_reg_low_addr[cluster]); } -static int __init nps_setup_clocksource(struct device_node *node, - struct clk *clk) +static int __init nps_setup_clocksource(struct device_node *node) { int ret, cluster; + struct clk *clk; + unsigned long nps_timer1_freq; + for (cluster = 0; cluster < NPS_CLUSTER_NUM; cluster++) nps_msu_reg_low_addr[cluster] = nps_host_reg((cluster << NPS_CLUSTER_OFFSET), - NPS_MSU_BLKID, NPS_MSU_TICK_LOW); + NPS_MSU_BLKID, NPS_MSU_TICK_LOW); - ret = clk_prepare_enable(clk); - if (ret) { - pr_err("Couldn't enable parent clock\n"); + ret = nps_get_timer_clk(node, &nps_timer1_freq, &clk); + if (ret) return ret; - } - - nps_timer_rate = clk_get_rate(clk); - ret = clocksource_mmio_init(nps_msu_reg_low_addr, "EZnps-tick", - nps_timer_rate, 301, 32, nps_clksrc_read); + ret = clocksource_mmio_init(nps_msu_reg_low_addr, "nps-tick", + nps_timer1_freq, 300, 32, nps_clksrc_read); if (ret) { pr_err("Couldn't register clock source.\n"); clk_disable_unprepare(clk); @@ -83,18 +110,175 @@ static int __init nps_setup_clocksource(struct device_node *node, return ret; } -static int __init nps_timer_init(struct device_node *node) +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", + nps_setup_clocksource); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_src, "ezchip,nps400-timer1", + nps_setup_clocksource); + +#ifdef CONFIG_EZNPS_MTM_EXT +#include <soc/nps/mtm.h> + +/* Timer related Aux registers */ +#define NPS_REG_TIMER0_TSI 0xFFFFF850 +#define NPS_REG_TIMER0_LIMIT 0x23 +#define NPS_REG_TIMER0_CTRL 0x22 +#define NPS_REG_TIMER0_CNT 0x21 + +/* + * Interrupt Enabled (IE) - re-arm the timer + * Not Halted (NH) - is cleared when working with JTAG (for debug) + */ +#define TIMER0_CTRL_IE BIT(0) +#define TIMER0_CTRL_NH BIT(1) + +static unsigned long nps_timer0_freq; +static unsigned long nps_timer0_irq; + +static void nps_clkevent_rm_thread(void) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + + /* remove thread from TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads &= ~(1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* Acknowledge and if needed re-arm the timer */ + if (!enabled_threads) + write_aux_reg(NPS_REG_TIMER0_CTRL, TIMER0_CTRL_NH); + else + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +static void nps_clkevent_add_thread(unsigned long delta) +{ + int thread; + unsigned int cflags, enabled_threads; + + hw_schd_save(&cflags); + + /* add thread to TSI1 */ + thread = read_aux_reg(CTOP_AUX_THREAD_ID); + enabled_threads = read_aux_reg(NPS_REG_TIMER0_TSI); + enabled_threads |= (1 << thread); + write_aux_reg(NPS_REG_TIMER0_TSI, enabled_threads); + + /* set next timer event */ + write_aux_reg(NPS_REG_TIMER0_LIMIT, delta); + write_aux_reg(NPS_REG_TIMER0_CNT, 0); + write_aux_reg(NPS_REG_TIMER0_CTRL, + TIMER0_CTRL_IE | TIMER0_CTRL_NH); + + hw_schd_restore(cflags); +} + +/* + * Whenever anyone tries to change modes, we just mask interrupts + * and wait for the next event to get set. + */ +static int nps_clkevent_set_state(struct clock_event_device *dev) +{ + nps_clkevent_rm_thread(); + disable_percpu_irq(nps_timer0_irq); + + return 0; +} + +static int nps_clkevent_set_next_event(unsigned long delta, + struct clock_event_device *dev) +{ + nps_clkevent_add_thread(delta); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static DEFINE_PER_CPU(struct clock_event_device, nps_clockevent_device) = { + .name = "NPS Timer0", + .features = CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = nps_clkevent_set_next_event, + .set_state_oneshot = nps_clkevent_set_state, + .set_state_oneshot_stopped = nps_clkevent_set_state, + .set_state_shutdown = nps_clkevent_set_state, + .tick_resume = nps_clkevent_set_state, +}; + +static irqreturn_t timer_irq_handler(int irq, void *dev_id) +{ + struct clock_event_device *evt = dev_id; + + nps_clkevent_rm_thread(); + evt->event_handler(evt); + + return IRQ_HANDLED; +} + +static int nps_timer_starting_cpu(unsigned int cpu) +{ + struct clock_event_device *evt = this_cpu_ptr(&nps_clockevent_device); + + evt->cpumask = cpumask_of(smp_processor_id()); + + clockevents_config_and_register(evt, nps_timer0_freq, 0, ULONG_MAX); + enable_percpu_irq(nps_timer0_irq, IRQ_TYPE_NONE); + + return 0; +} + +static int nps_timer_dying_cpu(unsigned int cpu) +{ + disable_percpu_irq(nps_timer0_irq); + return 0; +} + +static int __init nps_setup_clockevent(struct device_node *node) { struct clk *clk; + int ret; - clk = of_clk_get(node, 0); - if (IS_ERR(clk)) { - pr_err("Can't get timer clock.\n"); - return PTR_ERR(clk); + nps_timer0_irq = irq_of_parse_and_map(node, 0); + if (nps_timer0_irq <= 0) { + pr_err("clockevent: missing irq"); + return -EINVAL; } - return nps_setup_clocksource(node, clk); + ret = nps_get_timer_clk(node, &nps_timer0_freq, &clk); + if (ret) + return ret; + + /* Needs apriori irq_set_percpu_devid() done in intc map function */ + ret = request_percpu_irq(nps_timer0_irq, timer_irq_handler, + "Timer0 (per-cpu-tick)", + &nps_clockevent_device); + if (ret) { + pr_err("Couldn't request irq\n"); + clk_disable_unprepare(clk); + return ret; + } + + ret = cpuhp_setup_state(CPUHP_AP_ARC_TIMER_STARTING, + "clockevents/nps:starting", + nps_timer_starting_cpu, + nps_timer_dying_cpu); + if (ret) { + pr_err("Failed to setup hotplug state"); + clk_disable_unprepare(clk); + free_percpu_irq(nps_timer0_irq, &nps_clockevent_device); + return ret; + } + + return 0; } -CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clksrc, "ezchip,nps400-timer", - nps_timer_init); +CLOCKSOURCE_OF_DECLARE(ezchip_nps400_clk_evt, "ezchip,nps400-timer0", + nps_setup_clockevent); +#endif /* CONFIG_EZNPS_MTM_EXT */ |