summaryrefslogtreecommitdiff
path: root/drivers/clocksource
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/clocksource')
-rw-r--r--drivers/clocksource/Kconfig14
-rw-r--r--drivers/clocksource/arm_arch_timer.c3
-rw-r--r--drivers/clocksource/arm_global_timer.c122
-rw-r--r--drivers/clocksource/ingenic-sysost.c10
-rw-r--r--drivers/clocksource/samsung_pwm_timer.c41
-rw-r--r--drivers/clocksource/timer-mediatek.c24
-rw-r--r--drivers/clocksource/timer-ti-dm.c9
7 files changed, 193 insertions, 30 deletions
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 39aa21d01e05..9fa28237715a 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -358,6 +358,20 @@ config ARM_GLOBAL_TIMER
help
This option enables support for the ARM global timer unit.
+config ARM_GT_INITIAL_PRESCALER_VAL
+ int "ARM global timer initial prescaler value"
+ default 2 if ARCH_ZYNQ
+ default 1
+ depends on ARM_GLOBAL_TIMER
+ help
+ When the ARM global timer initializes, its current rate is declared
+ to the kernel and maintained forever. Should it's parent clock
+ change, the driver tries to fix the timer's internal prescaler.
+ On some machs (i.e. Zynq) the initial prescaler value thus poses
+ bounds about how much the parent clock is allowed to decrease or
+ increase wrt the initial clock value.
+ This affects CPU_FREQ max delta from the initial frequency.
+
config ARM_TIMER_SP804
bool "Support for Dual Timer SP804 module" if COMPILE_TEST
depends on GENERIC_SCHED_CLOCK && CLKDEV_LOOKUP
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index fe1a82627d57..be6d741d404c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -64,7 +64,6 @@ struct arch_timer {
#define to_arch_timer(e) container_of(e, struct arch_timer, evt)
static u32 arch_timer_rate __ro_after_init;
-u32 arch_timer_rate1 __ro_after_init;
static int arch_timer_ppi[ARCH_TIMER_MAX_TIMER_PPI] __ro_after_init;
static const char *arch_timer_ppi_names[ARCH_TIMER_MAX_TIMER_PPI] = {
@@ -365,7 +364,7 @@ static u64 notrace arm64_858921_read_cntvct_el0(void)
do { \
_val = read_sysreg(reg); \
_retries--; \
- } while (((_val + 1) & GENMASK(9, 0)) <= 1 && _retries); \
+ } while (((_val + 1) & GENMASK(8, 0)) <= 1 && _retries); \
\
WARN_ON_ONCE(!_retries); \
_val; \
diff --git a/drivers/clocksource/arm_global_timer.c b/drivers/clocksource/arm_global_timer.c
index 88b2d38a7a61..44a61dc6f932 100644
--- a/drivers/clocksource/arm_global_timer.c
+++ b/drivers/clocksource/arm_global_timer.c
@@ -31,6 +31,10 @@
#define GT_CONTROL_COMP_ENABLE BIT(1) /* banked */
#define GT_CONTROL_IRQ_ENABLE BIT(2) /* banked */
#define GT_CONTROL_AUTO_INC BIT(3) /* banked */
+#define GT_CONTROL_PRESCALER_SHIFT 8
+#define GT_CONTROL_PRESCALER_MAX 0xF
+#define GT_CONTROL_PRESCALER_MASK (GT_CONTROL_PRESCALER_MAX << \
+ GT_CONTROL_PRESCALER_SHIFT)
#define GT_INT_STATUS 0x0c
#define GT_INT_STATUS_EVENT_FLAG BIT(0)
@@ -39,6 +43,7 @@
#define GT_COMP1 0x14
#define GT_AUTO_INC 0x18
+#define MAX_F_ERR 50
/*
* We are expecting to be clocked by the ARM peripheral clock.
*
@@ -46,7 +51,8 @@
* the units for all operations.
*/
static void __iomem *gt_base;
-static unsigned long gt_clk_rate;
+static struct notifier_block gt_clk_rate_change_nb;
+static u32 gt_psv_new, gt_psv_bck, gt_target_rate;
static int gt_ppi;
static struct clock_event_device __percpu *gt_evt;
@@ -96,7 +102,10 @@ static void gt_compare_set(unsigned long delta, int periodic)
unsigned long ctrl;
counter += delta;
- ctrl = GT_CONTROL_TIMER_ENABLE;
+ ctrl = readl(gt_base + GT_CONTROL);
+ ctrl &= ~(GT_CONTROL_COMP_ENABLE | GT_CONTROL_IRQ_ENABLE |
+ GT_CONTROL_AUTO_INC);
+ ctrl |= GT_CONTROL_TIMER_ENABLE;
writel_relaxed(ctrl, gt_base + GT_CONTROL);
writel_relaxed(lower_32_bits(counter), gt_base + GT_COMP0);
writel_relaxed(upper_32_bits(counter), gt_base + GT_COMP1);
@@ -123,7 +132,7 @@ static int gt_clockevent_shutdown(struct clock_event_device *evt)
static int gt_clockevent_set_periodic(struct clock_event_device *evt)
{
- gt_compare_set(DIV_ROUND_CLOSEST(gt_clk_rate, HZ), 1);
+ gt_compare_set(DIV_ROUND_CLOSEST(gt_target_rate, HZ), 1);
return 0;
}
@@ -177,7 +186,7 @@ static int gt_starting_cpu(unsigned int cpu)
clk->cpumask = cpumask_of(cpu);
clk->rating = 300;
clk->irq = gt_ppi;
- clockevents_config_and_register(clk, gt_clk_rate,
+ clockevents_config_and_register(clk, gt_target_rate,
1, 0xffffffff);
enable_percpu_irq(clk->irq, IRQ_TYPE_NONE);
return 0;
@@ -232,9 +241,28 @@ static struct delay_timer gt_delay_timer = {
.read_current_timer = gt_read_long,
};
+static void gt_write_presc(u32 psv)
+{
+ u32 reg;
+
+ reg = readl(gt_base + GT_CONTROL);
+ reg &= ~GT_CONTROL_PRESCALER_MASK;
+ reg |= psv << GT_CONTROL_PRESCALER_SHIFT;
+ writel(reg, gt_base + GT_CONTROL);
+}
+
+static u32 gt_read_presc(void)
+{
+ u32 reg;
+
+ reg = readl(gt_base + GT_CONTROL);
+ reg &= GT_CONTROL_PRESCALER_MASK;
+ return reg >> GT_CONTROL_PRESCALER_SHIFT;
+}
+
static void __init gt_delay_timer_init(void)
{
- gt_delay_timer.freq = gt_clk_rate;
+ gt_delay_timer.freq = gt_target_rate;
register_current_timer_delay(&gt_delay_timer);
}
@@ -243,18 +271,81 @@ static int __init gt_clocksource_init(void)
writel(0, gt_base + GT_CONTROL);
writel(0, gt_base + GT_COUNTER0);
writel(0, gt_base + GT_COUNTER1);
- /* enables timer on all the cores */
- writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
+ /* set prescaler and enable timer on all the cores */
+ writel(((CONFIG_ARM_GT_INITIAL_PRESCALER_VAL - 1) <<
+ GT_CONTROL_PRESCALER_SHIFT)
+ | GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
#ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
- sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
+ sched_clock_register(gt_sched_clock_read, 64, gt_target_rate);
#endif
- return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+ return clocksource_register_hz(&gt_clocksource, gt_target_rate);
+}
+
+static int gt_clk_rate_change_cb(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct clk_notifier_data *ndata = data;
+
+ switch (event) {
+ case PRE_RATE_CHANGE:
+ {
+ int psv;
+
+ psv = DIV_ROUND_CLOSEST(ndata->new_rate,
+ gt_target_rate);
+
+ if (abs(gt_target_rate - (ndata->new_rate / psv)) > MAX_F_ERR)
+ return NOTIFY_BAD;
+
+ psv--;
+
+ /* prescaler within legal range? */
+ if (psv < 0 || psv > GT_CONTROL_PRESCALER_MAX)
+ return NOTIFY_BAD;
+
+ /*
+ * store timer clock ctrl register so we can restore it in case
+ * of an abort.
+ */
+ gt_psv_bck = gt_read_presc();
+ gt_psv_new = psv;
+ /* scale down: adjust divider in post-change notification */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_DONE;
+
+ /* scale up: adjust divider now - before frequency change */
+ gt_write_presc(psv);
+ break;
+ }
+ case POST_RATE_CHANGE:
+ /* scale up: pre-change notification did the adjustment */
+ if (ndata->new_rate > ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* scale down: adjust divider now - after frequency change */
+ gt_write_presc(gt_psv_new);
+ break;
+
+ case ABORT_RATE_CHANGE:
+ /* we have to undo the adjustment in case we scale up */
+ if (ndata->new_rate < ndata->old_rate)
+ return NOTIFY_OK;
+
+ /* restore original register value */
+ gt_write_presc(gt_psv_bck);
+ break;
+ default:
+ return NOTIFY_DONE;
+ }
+
+ return NOTIFY_DONE;
}
static int __init global_timer_of_register(struct device_node *np)
{
struct clk *gt_clk;
+ static unsigned long gt_clk_rate;
int err = 0;
/*
@@ -292,11 +383,20 @@ static int __init global_timer_of_register(struct device_node *np)
}
gt_clk_rate = clk_get_rate(gt_clk);
+ gt_target_rate = gt_clk_rate / CONFIG_ARM_GT_INITIAL_PRESCALER_VAL;
+ gt_clk_rate_change_nb.notifier_call =
+ gt_clk_rate_change_cb;
+ err = clk_notifier_register(gt_clk, &gt_clk_rate_change_nb);
+ if (err) {
+ pr_warn("Unable to register clock notifier\n");
+ goto out_clk;
+ }
+
gt_evt = alloc_percpu(struct clock_event_device);
if (!gt_evt) {
pr_warn("global-timer: can't allocate memory\n");
err = -ENOMEM;
- goto out_clk;
+ goto out_clk_nb;
}
err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
@@ -326,6 +426,8 @@ out_irq:
free_percpu_irq(gt_ppi, gt_evt);
out_free:
free_percpu(gt_evt);
+out_clk_nb:
+ clk_notifier_unregister(gt_clk, &gt_clk_rate_change_nb);
out_clk:
clk_disable_unprepare(gt_clk);
out_unmap:
diff --git a/drivers/clocksource/ingenic-sysost.c b/drivers/clocksource/ingenic-sysost.c
index e77d58449005..a129840f14f9 100644
--- a/drivers/clocksource/ingenic-sysost.c
+++ b/drivers/clocksource/ingenic-sysost.c
@@ -186,7 +186,7 @@ static const struct clk_ops ingenic_ost_global_timer_ops = {
static const char * const ingenic_ost_clk_parents[] = { "ext" };
-static const struct ingenic_ost_clk_info ingenic_ost_clk_info[] = {
+static const struct ingenic_ost_clk_info x1000_ost_clk_info[] = {
[OST_CLK_PERCPU_TIMER] = {
.init_data = {
.name = "percpu timer",
@@ -414,14 +414,14 @@ static const struct ingenic_soc_info x1000_soc_info = {
.num_channels = 2,
};
-static const struct of_device_id __maybe_unused ingenic_ost_of_match[] __initconst = {
- { .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info, },
+static const struct of_device_id __maybe_unused ingenic_ost_of_matches[] __initconst = {
+ { .compatible = "ingenic,x1000-ost", .data = &x1000_soc_info },
{ /* sentinel */ }
};
static int __init ingenic_ost_probe(struct device_node *np)
{
- const struct of_device_id *id = of_match_node(ingenic_ost_of_match, np);
+ const struct of_device_id *id = of_match_node(ingenic_ost_of_matches, np);
struct ingenic_ost *ost;
unsigned int i;
int ret;
@@ -462,7 +462,7 @@ static int __init ingenic_ost_probe(struct device_node *np)
ost->clocks->num = ost->soc_info->num_channels;
for (i = 0; i < ost->clocks->num; i++) {
- ret = ingenic_ost_register_clock(ost, i, &ingenic_ost_clk_info[i], ost->clocks);
+ ret = ingenic_ost_register_clock(ost, i, &x1000_ost_clk_info[i], ost->clocks);
if (ret) {
pr_crit("%s: Cannot register clock %d\n", __func__, i);
goto err_unregister_ost_clocks;
diff --git a/drivers/clocksource/samsung_pwm_timer.c b/drivers/clocksource/samsung_pwm_timer.c
index f760229d0c7f..6e46781bc9ac 100644
--- a/drivers/clocksource/samsung_pwm_timer.c
+++ b/drivers/clocksource/samsung_pwm_timer.c
@@ -4,7 +4,7 @@
* http://www.samsung.com/
*
* samsung - Common hr-timer support (s3c and s5p)
-*/
+ */
#include <linux/interrupt.h>
#include <linux/irq.h>
@@ -22,7 +22,6 @@
#include <clocksource/samsung_pwm.h>
-
/*
* Clocksource driver
*/
@@ -38,8 +37,8 @@
#define TCFG0_PRESCALER_MASK 0xff
#define TCFG0_PRESCALER1_SHIFT 8
-#define TCFG1_SHIFT(x) ((x) * 4)
-#define TCFG1_MUX_MASK 0xf
+#define TCFG1_SHIFT(x) ((x) * 4)
+#define TCFG1_MUX_MASK 0xf
/*
* Each channel occupies 4 bits in TCON register, but there is a gap of 4
@@ -62,7 +61,7 @@ EXPORT_SYMBOL(samsung_pwm_lock);
struct samsung_pwm_clocksource {
void __iomem *base;
- void __iomem *source_reg;
+ const void __iomem *source_reg;
unsigned int irq[SAMSUNG_PWM_NUM];
struct samsung_pwm_variant variant;
@@ -183,7 +182,7 @@ static void samsung_time_start(unsigned int channel, bool periodic)
}
static int samsung_set_next_event(unsigned long cycles,
- struct clock_event_device *evt)
+ struct clock_event_device *evt)
{
/*
* This check is needed to account for internal rounding
@@ -225,6 +224,7 @@ static void samsung_clockevent_resume(struct clock_event_device *cev)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
@@ -248,6 +248,7 @@ static irqreturn_t samsung_clock_event_isr(int irq, void *dev_id)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
@@ -272,7 +273,7 @@ static void __init samsung_clockevent_init(void)
time_event_device.cpumask = cpumask_of(0);
clockevents_config_and_register(&time_event_device,
- clock_rate, 1, pwm.tcnt_max);
+ clock_rate, 1, pwm.tcnt_max);
irq_number = pwm.irq[pwm.event_id];
if (request_irq(irq_number, samsung_clock_event_isr,
@@ -282,6 +283,7 @@ static void __init samsung_clockevent_init(void)
if (pwm.variant.has_tint_cstat) {
u32 mask = (1 << pwm.event_id);
+
writel(mask | (mask << 5), pwm.base + REG_TINT_CSTAT);
}
}
@@ -347,7 +349,7 @@ static int __init samsung_clocksource_init(void)
pwm.source_reg = pwm.base + pwm.source_id * 0x0c + 0x14;
sched_clock_register(samsung_read_sched_clock,
- pwm.variant.bits, clock_rate);
+ pwm.variant.bits, clock_rate);
samsung_clocksource.mask = CLOCKSOURCE_MASK(pwm.variant.bits);
return clocksource_register_hz(&samsung_clocksource, clock_rate);
@@ -398,7 +400,8 @@ static int __init _samsung_pwm_clocksource_init(void)
}
void __init samsung_pwm_clocksource_init(void __iomem *base,
- unsigned int *irqs, struct samsung_pwm_variant *variant)
+ unsigned int *irqs,
+ const struct samsung_pwm_variant *variant)
{
pwm.base = base;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
@@ -418,7 +421,7 @@ static int __init samsung_pwm_alloc(struct device_node *np,
struct property *prop;
const __be32 *cur;
u32 val;
- int i;
+ int i, ret;
memcpy(&pwm.variant, variant, sizeof(pwm.variant));
for (i = 0; i < SAMSUNG_PWM_NUM; ++i)
@@ -441,10 +444,24 @@ static int __init samsung_pwm_alloc(struct device_node *np,
pwm.timerclk = of_clk_get_by_name(np, "timers");
if (IS_ERR(pwm.timerclk)) {
pr_crit("failed to get timers clock for timer\n");
- return PTR_ERR(pwm.timerclk);
+ ret = PTR_ERR(pwm.timerclk);
+ goto err_clk;
}
- return _samsung_pwm_clocksource_init();
+ ret = _samsung_pwm_clocksource_init();
+ if (ret)
+ goto err_clocksource;
+
+ return 0;
+
+err_clocksource:
+ clk_put(pwm.timerclk);
+ pwm.timerclk = NULL;
+err_clk:
+ iounmap(pwm.base);
+ pwm.base = NULL;
+
+ return ret;
}
static const struct samsung_pwm_variant s3c24xx_variant = {
diff --git a/drivers/clocksource/timer-mediatek.c b/drivers/clocksource/timer-mediatek.c
index 9318edcd8963..ab63b95e414f 100644
--- a/drivers/clocksource/timer-mediatek.c
+++ b/drivers/clocksource/timer-mediatek.c
@@ -241,6 +241,28 @@ static void mtk_gpt_enable_irq(struct timer_of *to, u8 timer)
timer_of_base(to) + GPT_IRQ_EN_REG);
}
+static void mtk_gpt_resume(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ mtk_gpt_enable_irq(to, TIMER_CLK_EVT);
+}
+
+static void mtk_gpt_suspend(struct clock_event_device *clk)
+{
+ struct timer_of *to = to_timer_of(clk);
+
+ /* Disable all interrupts */
+ writel(0x0, timer_of_base(to) + GPT_IRQ_EN_REG);
+
+ /*
+ * This is called with interrupts disabled,
+ * so we need to ack any interrupt that is pending
+ * or for example ATF will prevent a suspend from completing.
+ */
+ writel(0x3f, timer_of_base(to) + GPT_IRQ_ACK_REG);
+}
+
static struct timer_of to = {
.flags = TIMER_OF_IRQ | TIMER_OF_BASE | TIMER_OF_CLOCK,
@@ -286,6 +308,8 @@ static int __init mtk_gpt_init(struct device_node *node)
to.clkevt.set_state_oneshot = mtk_gpt_clkevt_shutdown;
to.clkevt.tick_resume = mtk_gpt_clkevt_shutdown;
to.clkevt.set_next_event = mtk_gpt_clkevt_next_event;
+ to.clkevt.suspend = mtk_gpt_suspend;
+ to.clkevt.resume = mtk_gpt_resume;
to.of_irq.handler = mtk_gpt_interrupt;
ret = timer_of_init(node, &to);
diff --git a/drivers/clocksource/timer-ti-dm.c b/drivers/clocksource/timer-ti-dm.c
index 33eeabf9c3d1..3e52c5226c4d 100644
--- a/drivers/clocksource/timer-ti-dm.c
+++ b/drivers/clocksource/timer-ti-dm.c
@@ -78,6 +78,9 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
static void omap_timer_restore_context(struct omap_dm_timer *timer)
{
+ __omap_dm_timer_write(timer, OMAP_TIMER_OCP_CFG_OFFSET,
+ timer->context.ocp_cfg, 0);
+
omap_dm_timer_write_reg(timer, OMAP_TIMER_WAKEUP_EN_REG,
timer->context.twer);
omap_dm_timer_write_reg(timer, OMAP_TIMER_COUNTER_REG,
@@ -95,6 +98,9 @@ static void omap_timer_restore_context(struct omap_dm_timer *timer)
static void omap_timer_save_context(struct omap_dm_timer *timer)
{
+ timer->context.ocp_cfg =
+ __omap_dm_timer_read(timer, OMAP_TIMER_OCP_CFG_OFFSET, 0);
+
timer->context.tclr =
omap_dm_timer_read_reg(timer, OMAP_TIMER_CTRL_REG);
timer->context.twer =
@@ -122,7 +128,8 @@ static int omap_timer_context_notifier(struct notifier_block *nb,
break;
omap_timer_save_context(timer);
break;
- case CPU_CLUSTER_PM_ENTER_FAILED:
+ case CPU_CLUSTER_PM_ENTER_FAILED: /* No need to restore context */
+ break;
case CPU_CLUSTER_PM_EXIT:
if ((timer->capability & OMAP_TIMER_ALWON) ||
!atomic_read(&timer->enabled))