diff options
Diffstat (limited to 'arch/arm/plat-iop')
-rw-r--r-- | arch/arm/plat-iop/time.c | 176 |
1 files changed, 141 insertions, 35 deletions
diff --git a/arch/arm/plat-iop/time.c b/arch/arm/plat-iop/time.c index 8da95d57c21f..6c8a02ad98e3 100644 --- a/arch/arm/plat-iop/time.c +++ b/arch/arm/plat-iop/time.c @@ -19,6 +19,8 @@ #include <linux/init.h> #include <linux/timex.h> #include <linux/io.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> #include <mach/hardware.h> #include <asm/irq.h> #include <asm/uaccess.h> @@ -26,45 +28,136 @@ #include <asm/mach/time.h> #include <mach/time.h> +/* + * IOP clocksource (free-running timer 1). + */ +static cycle_t iop_clocksource_read(struct clocksource *unused) +{ + return 0xffffffffu - read_tcr1(); +} + +static struct clocksource iop_clocksource = { + .name = "iop_timer1", + .rating = 300, + .read = iop_clocksource_read, + .mask = CLOCKSOURCE_MASK(32), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static void __init iop_clocksource_set_hz(struct clocksource *cs, unsigned int hz) +{ + u64 temp; + u32 shift; + + /* Find shift and mult values for hz. */ + shift = 32; + do { + temp = (u64) NSEC_PER_SEC << shift; + do_div(temp, hz); + if ((temp >> 32) == 0) + break; + } while (--shift != 0); + + cs->shift = shift; + cs->mult = (u32) temp; + + printk(KERN_INFO "clocksource: %s uses shift %u mult %#x\n", + cs->name, cs->shift, cs->mult); +} + +/* + * IOP sched_clock() implementation via its clocksource. + */ +unsigned long long sched_clock(void) +{ + cycle_t cyc = iop_clocksource_read(NULL); + struct clocksource *cs = &iop_clocksource; + + return clocksource_cyc2ns(cyc, cs->mult, cs->shift); +} + +/* + * IOP clockevents (interrupting timer 0). + */ +static int iop_set_next_event(unsigned long delta, + struct clock_event_device *unused) +{ + u32 tmr = IOP_TMR_PRIVILEGED | IOP_TMR_RATIO_1_1; + + BUG_ON(delta == 0); + write_tmr0(tmr & ~(IOP_TMR_EN | IOP_TMR_RELOAD)); + write_tcr0(delta); + write_tmr0((tmr & ~IOP_TMR_RELOAD) | IOP_TMR_EN); + + return 0; +} + static unsigned long ticks_per_jiffy; -static unsigned long ticks_per_usec; -static unsigned long next_jiffy_time; -unsigned long iop_gettimeoffset(void) +static void iop_set_mode(enum clock_event_mode mode, + struct clock_event_device *unused) { - unsigned long offset, temp; + u32 tmr = read_tmr0(); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + write_tmr0(tmr & ~IOP_TMR_EN); + write_tcr0(ticks_per_jiffy - 1); + tmr |= (IOP_TMR_RELOAD | IOP_TMR_EN); + break; + case CLOCK_EVT_MODE_ONESHOT: + /* ->set_next_event sets period and enables timer */ + tmr &= ~(IOP_TMR_RELOAD | IOP_TMR_EN); + break; + case CLOCK_EVT_MODE_RESUME: + tmr |= IOP_TMR_EN; + break; + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + default: + tmr &= ~IOP_TMR_EN; + break; + } - /* enable cp6, if necessary, to avoid taking the overhead of an - * undefined instruction trap - */ - asm volatile ( - "mrc p15, 0, %0, c15, c1, 0\n\t" - "tst %0, #(1 << 6)\n\t" - "orreq %0, %0, #(1 << 6)\n\t" - "mcreq p15, 0, %0, c15, c1, 0\n\t" -#ifdef CONFIG_CPU_XSCALE - "mrceq p15, 0, %0, c15, c1, 0\n\t" - "moveq %0, %0\n\t" - "subeq pc, pc, #4\n\t" -#endif - : "=r"(temp) : : "cc"); - - offset = next_jiffy_time - read_tcr1(); - - return offset / ticks_per_usec; + write_tmr0(tmr); +} + +static struct clock_event_device iop_clockevent = { + .name = "iop_timer0", + .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, + .rating = 300, + .set_next_event = iop_set_next_event, + .set_mode = iop_set_mode, +}; + +static void __init iop_clockevent_set_hz(struct clock_event_device *ce, unsigned int hz) +{ + u64 temp; + u32 shift; + + /* Find shift and mult values for hz. */ + shift = 32; + do { + temp = (u64) hz << shift; + do_div(temp, NSEC_PER_SEC); + if ((temp >> 32) == 0) + break; + } while (--shift != 0); + + ce->shift = shift; + ce->mult = (u32) temp; + + printk(KERN_INFO "clockevent: %s uses shift %u mult %#lx\n", + ce->name, ce->shift, ce->mult); } static irqreturn_t iop_timer_interrupt(int irq, void *dev_id) { - write_tisr(1); - - while ((signed long)(next_jiffy_time - read_tcr1()) - >= ticks_per_jiffy) { - timer_tick(); - next_jiffy_time -= ticks_per_jiffy; - } + struct clock_event_device *evt = dev_id; + write_tisr(1); + evt->event_handler(evt); return IRQ_HANDLED; } @@ -72,6 +165,7 @@ static struct irqaction iop_timer_irq = { .name = "IOP Timer Tick", .handler = iop_timer_interrupt, .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, + .dev_id = &iop_clockevent, }; static unsigned long iop_tick_rate; @@ -86,21 +180,33 @@ void __init iop_init_time(unsigned long tick_rate) u32 timer_ctl; ticks_per_jiffy = DIV_ROUND_CLOSEST(tick_rate, HZ); - ticks_per_usec = tick_rate / 1000000; - next_jiffy_time = 0xffffffff; iop_tick_rate = tick_rate; timer_ctl = IOP_TMR_EN | IOP_TMR_PRIVILEGED | IOP_TMR_RELOAD | IOP_TMR_RATIO_1_1; /* - * We use timer 0 for our timer interrupt, and timer 1 as - * monotonic counter for tracking missed jiffies. + * Set up interrupting clockevent timer 0. */ + write_tmr0(timer_ctl & ~IOP_TMR_EN); + setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); + iop_clockevent_set_hz(&iop_clockevent, tick_rate); + iop_clockevent.max_delta_ns = + clockevent_delta2ns(0xfffffffe, &iop_clockevent); + iop_clockevent.min_delta_ns = + clockevent_delta2ns(0xf, &iop_clockevent); + iop_clockevent.cpumask = cpumask_of(0); + clockevents_register_device(&iop_clockevent); write_trr0(ticks_per_jiffy - 1); + write_tcr0(ticks_per_jiffy - 1); write_tmr0(timer_ctl); + + /* + * Set up free-running clocksource timer 1. + */ write_trr1(0xffffffff); + write_tcr1(0xffffffff); write_tmr1(timer_ctl); - - setup_irq(IRQ_IOP_TIMER0, &iop_timer_irq); + iop_clocksource_set_hz(&iop_clocksource, tick_rate); + clocksource_register(&iop_clocksource); } |