summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-10 02:18:46 +0300
committerRafael J. Wysocki <rafael.j.wysocki@intel.com>2015-05-14 22:37:47 +0300
commit827a5aefc542b8fb17c00de06118e5cd0e3800f2 (patch)
tree5728ec6fb8a0b67057e35d131ed2a2d7d67b2a69
parentfaad38492814112e3e7ce94d90123bbe301fff33 (diff)
downloadlinux-827a5aefc542b8fb17c00de06118e5cd0e3800f2.tar.xz
sched / idle: Call default_idle_call() from cpuidle_enter_state()
The check of the cpuidle_enter() return value against -EBUSY made in call_cpuidle() will not be necessary any more if cpuidle_enter_state() calls default_idle_call() directly when it is about to return -EBUSY, so make that happen and eliminate the check. Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Preeti U Murthy <preeti@linux.vnet.ibm.com> Tested-by: Sudeep Holla <sudeep.holla@arm.com> Acked-by: Kevin Hilman <khilman@linaro.org>
-rw-r--r--drivers/cpuidle/cpuidle.c4
-rw-r--r--include/linux/cpuidle.h1
-rw-r--r--kernel/sched/idle.c20
3 files changed, 11 insertions, 14 deletions
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 9306dd5f460e..a7b9e679a2ef 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -167,8 +167,10 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
* local timer will be shut down. If a local timer is used from another
* CPU as a broadcast timer, this call may fail if it is not available.
*/
- if (broadcast && tick_broadcast_enter())
+ if (broadcast && tick_broadcast_enter()) {
+ default_idle_call();
return -EBUSY;
+ }
/* Take note of the planned idle state. */
sched_idle_set_state(target_state);
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 301eaaab40e3..c7a63643658e 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -202,6 +202,7 @@ static inline struct cpuidle_driver *cpuidle_get_cpu_driver(
/* kernel/sched/idle.c */
extern void sched_idle_set_state(struct cpuidle_state *idle_state);
+extern void default_idle_call(void);
#ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED
void cpuidle_coupled_parallel_barrier(struct cpuidle_device *dev, atomic_t *a);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 5d9f549fffa8..594275ed2620 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -76,12 +76,13 @@ void __weak arch_cpu_idle(void)
local_irq_enable();
}
-static void default_idle_call(void)
+/**
+ * default_idle_call - Default CPU idle routine.
+ *
+ * To use when the cpuidle framework cannot be used.
+ */
+void default_idle_call(void)
{
- /*
- * We can't use the cpuidle framework, let's use the default idle
- * routine.
- */
if (current_clr_polling_and_test())
local_irq_enable();
else
@@ -91,8 +92,6 @@ static void default_idle_call(void)
static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
int next_state)
{
- int entered_state;
-
/* Fall back to the default arch idle method on errors. */
if (next_state < 0) {
default_idle_call();
@@ -114,12 +113,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* This function will block until an interrupt occurs and will take
* care of re-enabling the local interrupts
*/
- entered_state = cpuidle_enter(drv, dev, next_state);
-
- if (entered_state == -EBUSY)
- default_idle_call();
-
- return entered_state;
+ return cpuidle_enter(drv, dev, next_state);
}
/**