diff options
author | Paul Walmsley <paul@pwsan.com> | 2013-01-26 11:58:13 +0400 |
---|---|---|
committer | Paul Walmsley <paul@pwsan.com> | 2013-01-30 01:59:56 +0400 |
commit | 1cd96478cf1505a32861ffed8d8f1189d1b1b8ab (patch) | |
tree | d65ebdd4a5be3d688147b6acf3a2777fc31c9f13 /arch/arm/mach-omap2/cpuidle34xx.c | |
parent | fd6b42a5614077b04ce8f34fbbcf16864723f5df (diff) | |
download | linux-1cd96478cf1505a32861ffed8d8f1189d1b1b8ab.tar.xz |
ARM: OMAP3xxx: CPUIdle: optimize __omap3_enter_idle()
Avoid programming the MPU and CORE powerdomain next-power-state
registers if those powerdomains will never enter low-power states
(e.g., the state that people refer to as "C1").
To avoid making assumptions about CPUIdle states based on their order
in the list, use a flag to mark CPUIdle states that don't enter
powerdomain low-power states.
Avoid a previous-power-state register read on the MPU powerdomain
unless we know that the MPU was supposed to go OFF during the last
state transition. Previous-power-state register reads can be very
expensive, so it's worth avoiding these when possible.
Since the CORE_L3 clockdomain can't go inactive unless the MPU is active,
there's little point blocking autoidle on the CORE_L3 clockdomain in "C1"
state, since we've programmed the MPU clockdomain to stay active.
Remove the unnecessary code.
Signed-off-by: Paul Walmsley <paul@pwsan.com>
Cc: Kevin Hilman <khilman@deeprootsystems.com>
Diffstat (limited to 'arch/arm/mach-omap2/cpuidle34xx.c')
-rw-r--r-- | arch/arm/mach-omap2/cpuidle34xx.c | 35 |
1 files changed, 22 insertions, 13 deletions
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index cba69455647a..80392fca86c6 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c @@ -39,11 +39,23 @@ struct omap3_idle_statedata { u8 mpu_state; u8 core_state; u8 per_min_state; + u8 flags; }; static struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd; /* + * Possible flag bits for struct omap3_idle_statedata.flags: + * + * OMAP_CPUIDLE_CX_NO_CLKDM_IDLE: don't allow the MPU clockdomain to go + * inactive. This in turn prevents the MPU DPLL from entering autoidle + * mode, so wakeup latency is greatly reduced, at the cost of additional + * energy consumption. This also prevents the CORE clockdomain from + * entering idle. + */ +#define OMAP_CPUIDLE_CX_NO_CLKDM_IDLE BIT(0) + +/* * Prevent PER OFF if CORE is not in RETention or OFF as this would * disable PER wakeups completely. */ @@ -53,6 +65,7 @@ static struct omap3_idle_statedata omap3_idle_data[] = { .core_state = PWRDM_POWER_ON, /* In C1 do not allow PER state lower than CORE state */ .per_min_state = PWRDM_POWER_ON, + .flags = OMAP_CPUIDLE_CX_NO_CLKDM_IDLE, }, { .mpu_state = PWRDM_POWER_ON, @@ -93,27 +106,25 @@ static int __omap3_enter_idle(struct cpuidle_device *dev, int index) { struct omap3_idle_statedata *cx = &omap3_idle_data[index]; - u32 mpu_state = cx->mpu_state, core_state = cx->core_state; local_fiq_disable(); - pwrdm_set_next_pwrst(mpu_pd, mpu_state); - pwrdm_set_next_pwrst(core_pd, core_state); - if (omap_irq_pending() || need_resched()) goto return_sleep_time; /* Deny idle for C1 */ - if (index == 0) { + if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) { clkdm_deny_idle(mpu_pd->pwrdm_clkdms[0]); - clkdm_deny_idle(core_pd->pwrdm_clkdms[0]); + } else { + pwrdm_set_next_pwrst(mpu_pd, cx->mpu_state); + pwrdm_set_next_pwrst(core_pd, cx->core_state); } /* * Call idle CPU PM enter notifier chain so that * VFP context is saved. */ - if (mpu_state == PWRDM_POWER_OFF) + if (cx->mpu_state == PWRDM_POWER_OFF) cpu_pm_enter(); /* Execute ARM wfi */ @@ -123,17 +134,15 @@ static int __omap3_enter_idle(struct cpuidle_device *dev, * Call idle CPU PM enter notifier chain to restore * VFP context. */ - if (pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) + if (cx->mpu_state == PWRDM_POWER_OFF && + pwrdm_read_prev_pwrst(mpu_pd) == PWRDM_POWER_OFF) cpu_pm_exit(); /* Re-allow idle for C1 */ - if (index == 0) { + if (cx->flags & OMAP_CPUIDLE_CX_NO_CLKDM_IDLE) clkdm_allow_idle(mpu_pd->pwrdm_clkdms[0]); - clkdm_allow_idle(core_pd->pwrdm_clkdms[0]); - } return_sleep_time: - local_fiq_enable(); return index; @@ -198,7 +207,7 @@ static int next_valid_state(struct cpuidle_device *dev, * Start search from the next (lower) state. */ for (idx = index - 1; idx >= 0; idx--) { - cx = &omap3_idle_data[idx]; + cx = &omap3_idle_data[idx]; if ((cx->mpu_state >= mpu_deepest_state) && (cx->core_state >= core_deepest_state)) { next_index = idx; |