diff options
author | Tkhai Kirill <tkhai@yandex.ru> | 2012-04-04 23:49:26 +0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2012-04-15 21:28:50 +0400 |
commit | 62f082830d63cf753ed0dab16f8d3b2d0ffc7f43 (patch) | |
tree | 39770d13d3dbff835eb3500c6a913da5c784fec3 /arch/sparc/kernel/time_32.c | |
parent | 472bc4f2ad164a5aac2e85d891c4faecfc5d62c4 (diff) | |
download | linux-62f082830d63cf753ed0dab16f8d3b2d0ffc7f43.tar.xz |
sparc32: generic clockevent support
The kernel uses l14 timers as clockevents. l10 timer is used
as clocksource if platform master_l10_counter isn't constantly
zero. The clocksource is continuous, so it's possible to use
high resolution timers. l10 timer is also used as clockevent
on UP configurations.
This realization is for sun4m, sun4d, sun4c, microsparc-IIep
and LEON platforms. The appropriate LEON changes was made by
Konrad Eisele.
In case of sun4m's oneshot mode, profile irq is zeroed in
smp4m_percpu_timer_interrupt(). It is maybe
needless (double, triple etc overflow does nothing).
sun4d is able to have oneshot mode too, but I haven't
any way to test it. So code of its percpu timer handler
is made as much equal to the current code as possible.
The patch is tested on sun4m box in SMP mode by me,
and tested by Konrad on leon in up mode (leon smp
is broken atm - due to other reasons).
Signed-off-by: Tkhai Kirill <tkhai@yandex.ru>
Tested-by: Konrad Eisele <konrad@gaisler.com> [leon up]
[sam: revised patch to provide generic support for leon]
Signed-off-by: Sam Ravnborg <sam@ravnborg.org>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/time_32.c')
-rw-r--r-- | arch/sparc/kernel/time_32.c | 215 |
1 files changed, 176 insertions, 39 deletions
diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 68e0284bf3f3..89e890bc0941 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -26,6 +26,8 @@ #include <linux/rtc.h> #include <linux/rtc/m48t59.h> #include <linux/timex.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ioport.h> @@ -44,9 +46,21 @@ #include <asm/page.h> #include <asm/pcic.h> #include <asm/irq_regs.h> +#include <asm/setup.h> #include "irq.h" +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock); +static __volatile__ u64 timer_cs_internal_counter = 0; +static char timer_cs_enabled = 0; + +static struct clock_event_device timer_ce; +static char timer_ce_enabled = 0; + +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent); +#endif + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); @@ -75,36 +89,167 @@ EXPORT_SYMBOL(profile_pc); __volatile__ unsigned int *master_l10_counter; -u32 (*do_arch_gettimeoffset)(void); - int update_persistent_clock(struct timespec now) { return set_rtc_mmss(now.tv_sec); } -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "xtime_update()" routine every clocktick - */ +irqreturn_t notrace timer_interrupt(int dummy, void *dev_id) +{ + if (timer_cs_enabled) { + write_seqlock(&timer_cs_lock); + timer_cs_internal_counter++; + clear_clock_irq(); + write_sequnlock(&timer_cs_lock); + } else { + clear_clock_irq(); + } -#define TICK_SIZE (tick_nsec / 1000) + if (timer_ce_enabled) + timer_ce.event_handler(&timer_ce); -static irqreturn_t timer_interrupt(int dummy, void *dev_id) + return IRQ_HANDLED; +} + +static void timer_ce_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { -#ifndef CONFIG_SMP - profile_tick(CPU_PROFILING); -#endif + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_RESUME: + timer_ce_enabled = 1; + break; + case CLOCK_EVT_MODE_SHUTDOWN: + timer_ce_enabled = 0; + break; + default: + break; + } + smp_mb(); +} + +static __init void setup_timer_ce(void) +{ + struct clock_event_device *ce = &timer_ce; + + BUG_ON(smp_processor_id() != boot_cpu_id); + + ce->name = "timer_ce"; + ce->rating = 100; + ce->features = CLOCK_EVT_FEAT_PERIODIC; + ce->set_mode = timer_ce_set_mode; + ce->cpumask = cpu_possible_mask; + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + clockevents_register_device(ce); +} - clear_clock_irq(); +static unsigned int sbus_cycles_offset(void) +{ + unsigned int val, offset; - xtime_update(1); + val = *master_l10_counter; + offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK; -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - return IRQ_HANDLED; + /* Limit hit? */ + if (val & TIMER_LIMIT_BIT) + offset += sparc_config.cs_period; + + return offset; } +static cycle_t timer_cs_read(struct clocksource *cs) +{ + unsigned int seq, offset; + u64 cycles; + + do { + seq = read_seqbegin(&timer_cs_lock); + + cycles = timer_cs_internal_counter; + offset = sparc_config.get_cycles_offset(); + } while (read_seqretry(&timer_cs_lock, seq)); + + /* Count absolute cycles */ + cycles *= sparc_config.cs_period; + cycles += offset; + + return cycles; +} + +static struct clocksource timer_cs = { + .name = "timer_cs", + .rating = 100, + .read = timer_cs_read, + .mask = CLOCKSOURCE_MASK(64), + .shift = 2, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static __init int setup_timer_cs(void) +{ + timer_cs_enabled = 1; + timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, + timer_cs.shift); + + return clocksource_register(&timer_cs); +} + +#ifdef CONFIG_SMP +static void percpu_ce_setup(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + int cpu = __first_cpu(evt->cpumask); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + load_profile_irq(cpu, SBUS_CLOCK_RATE / HZ); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + load_profile_irq(cpu, 0); + break; + default: + break; + } +} + +static int percpu_ce_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + int cpu = __first_cpu(evt->cpumask); + unsigned int next = (unsigned int)delta; + + load_profile_irq(cpu, next); + return 0; +} + +void register_percpu_ce(int cpu) +{ + struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu); + unsigned int features = CLOCK_EVT_FEAT_PERIODIC; + + if (sparc_config.features & FEAT_L14_ONESHOT) + features |= CLOCK_EVT_FEAT_ONESHOT; + + ce->name = "percpu_ce"; + ce->rating = 200; + ce->features = features; + ce->set_mode = percpu_ce_setup; + ce->set_next_event = percpu_ce_set_next_event; + ce->cpumask = cpumask_of(cpu); + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce); + ce->min_delta_ns = clockevent_delta2ns(100, ce); + + clockevents_register_device(ce); +} +#endif + static unsigned char mostek_read_byte(struct device *dev, u32 ofs) { struct platform_device *pdev = to_platform_device(dev); @@ -195,38 +340,30 @@ static int __init clock_init(void) */ fs_initcall(clock_init); - -u32 sbus_do_gettimeoffset(void) +static void __init sparc32_late_time_init(void) { - unsigned long val = *master_l10_counter; - unsigned long usec = (val >> 10) & 0x1fffff; - - /* Limit hit? */ - if (val & 0x80000000) - usec += 1000000 / HZ; - - return usec * 1000; + if (sparc_config.features & FEAT_L10_CLOCKEVENT) + setup_timer_ce(); + if (sparc_config.features & FEAT_L10_CLOCKSOURCE) + setup_timer_cs(); +#ifdef CONFIG_SMP + register_percpu_ce(smp_processor_id()); +#endif } - -u32 arch_gettimeoffset(void) +static void __init sbus_time_init(void) { - if (unlikely(!do_arch_gettimeoffset)) - return 0; - return do_arch_gettimeoffset(); + sparc_config.get_cycles_offset = sbus_cycles_offset; + sparc_config.init_timers(); } -static void __init sbus_time_init(void) +void __init time_init(void) { - do_arch_gettimeoffset = sbus_do_gettimeoffset; - btfixup(); - sparc_config.init_timers(timer_interrupt); -} + sparc_config.features = 0; + late_time_init = sparc32_late_time_init; -void __init time_init(void) -{ if (pcic_present()) pci_time_init(); else |