summaryrefslogtreecommitdiff
path: root/drivers/base/power
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base/power')
-rw-r--r--drivers/base/power/Makefile2
-rw-r--r--drivers/base/power/clock_ops.c6
-rw-r--r--drivers/base/power/domain.c368
-rw-r--r--drivers/base/power/domain_governor.c6
-rw-r--r--drivers/base/power/generic_ops.c23
-rw-r--r--drivers/base/power/opp/Makefile2
-rw-r--r--drivers/base/power/opp/core.c (renamed from drivers/base/power/opp.c)363
-rw-r--r--drivers/base/power/opp/cpu.c267
-rw-r--r--drivers/base/power/opp/opp.h143
-rw-r--r--drivers/base/power/wakeup.c16
10 files changed, 555 insertions, 641 deletions
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index f94a6ccfe787..5998c53280f5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,7 +1,7 @@
obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o wakeirq.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
-obj-$(CONFIG_PM_OPP) += opp.o
+obj-$(CONFIG_PM_OPP) += opp/
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
obj-$(CONFIG_HAVE_CLK) += clock_ops.o
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index 652b5a367c1f..fd0973b922a7 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -17,7 +17,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_CLK
enum pce_status {
PCE_STATUS_NONE = 0,
@@ -404,7 +404,7 @@ int pm_clk_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
-#else /* !CONFIG_PM */
+#else /* !CONFIG_PM_CLK */
/**
* enable_clock - Enable a device clock.
@@ -484,7 +484,7 @@ static int pm_clk_notify(struct notifier_block *nb,
return 0;
}
-#endif /* !CONFIG_PM */
+#endif /* !CONFIG_PM_CLK */
/**
* pm_clk_add_notifier - Add bus type notifier for power management clocks.
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 16550c63d611..a7dfdf9f15ba 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -34,43 +34,9 @@
__ret; \
})
-#define GENPD_DEV_TIMED_CALLBACK(genpd, type, callback, dev, field, name) \
-({ \
- ktime_t __start = ktime_get(); \
- type __retval = GENPD_DEV_CALLBACK(genpd, type, callback, dev); \
- s64 __elapsed = ktime_to_ns(ktime_sub(ktime_get(), __start)); \
- struct gpd_timing_data *__td = &dev_gpd_data(dev)->td; \
- if (!__retval && __elapsed > __td->field) { \
- __td->field = __elapsed; \
- dev_dbg(dev, name " latency exceeded, new value %lld ns\n", \
- __elapsed); \
- genpd->max_off_time_changed = true; \
- __td->constraint_changed = true; \
- } \
- __retval; \
-})
-
static LIST_HEAD(gpd_list);
static DEFINE_MUTEX(gpd_list_lock);
-static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- if (IS_ERR_OR_NULL(domain_name))
- return NULL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (!strcmp(gpd->name, domain_name)) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
- return genpd;
-}
-
/*
* Get the generic PM domain for a particular struct device.
* This validates the struct device pointer, the PM domain pointer,
@@ -110,18 +76,12 @@ static struct generic_pm_domain *dev_to_genpd(struct device *dev)
static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, stop, dev,
- stop_latency_ns, "stop");
+ return GENPD_DEV_CALLBACK(genpd, int, stop, dev);
}
-static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev,
- bool timed)
+static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- if (!timed)
- return GENPD_DEV_CALLBACK(genpd, int, start, dev);
-
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev,
- start_latency_ns, "start");
+ return GENPD_DEV_CALLBACK(genpd, int, start, dev);
}
static bool genpd_sd_counter_dec(struct generic_pm_domain *genpd)
@@ -140,19 +100,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd)
smp_mb__after_atomic();
}
-static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
-{
- s64 usecs64;
-
- if (!genpd->cpuidle_data)
- return;
-
- usecs64 = genpd->power_on_latency_ns;
- do_div(usecs64, NSEC_PER_USEC);
- usecs64 += genpd->cpuidle_data->saved_exit_latency;
- genpd->cpuidle_data->idle_state->exit_latency = usecs64;
-}
-
static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
{
ktime_t time_start;
@@ -176,7 +123,6 @@ static int genpd_power_on(struct generic_pm_domain *genpd, bool timed)
genpd->power_on_latency_ns = elapsed_ns;
genpd->max_off_time_changed = true;
- genpd_recalc_cpu_exit_latency(genpd);
pr_debug("%s: Power-%s latency exceeded, new value %lld ns\n",
genpd->name, "on", elapsed_ns);
@@ -213,10 +159,10 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed)
}
/**
- * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff().
+ * genpd_queue_power_off_work - Queue up the execution of genpd_poweroff().
* @genpd: PM domait to power off.
*
- * Queue up the execution of pm_genpd_poweroff() unless it's already been done
+ * Queue up the execution of genpd_poweroff() unless it's already been done
* before.
*/
static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
@@ -224,14 +170,16 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
queue_work(pm_wq, &genpd->power_off_work);
}
+static int genpd_poweron(struct generic_pm_domain *genpd);
+
/**
- * __pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * __genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*
* Restore power to @genpd and all of its masters so that it is possible to
* resume a device belonging to it.
*/
-static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int __genpd_poweron(struct generic_pm_domain *genpd)
{
struct gpd_link *link;
int ret = 0;
@@ -240,13 +188,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
|| (genpd->prepared_count > 0 && genpd->suspend_power_off))
return 0;
- if (genpd->cpuidle_data) {
- cpuidle_pause_and_lock();
- genpd->cpuidle_data->idle_state->disabled = true;
- cpuidle_resume_and_unlock();
- goto out;
- }
-
/*
* The list is guaranteed not to change while the loop below is being
* executed, unless one of the masters' .power_on() callbacks fiddles
@@ -255,7 +196,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
list_for_each_entry(link, &genpd->slave_links, slave_node) {
genpd_sd_counter_inc(link->master);
- ret = pm_genpd_poweron(link->master);
+ ret = genpd_poweron(link->master);
if (ret) {
genpd_sd_counter_dec(link->master);
goto err;
@@ -266,7 +207,6 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
if (ret)
goto err;
- out:
genpd->status = GPD_STATE_ACTIVE;
return 0;
@@ -282,46 +222,28 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
}
/**
- * pm_genpd_poweron - Restore power to a given PM domain and its masters.
+ * genpd_poweron - Restore power to a given PM domain and its masters.
* @genpd: PM domain to power up.
*/
-int pm_genpd_poweron(struct generic_pm_domain *genpd)
+static int genpd_poweron(struct generic_pm_domain *genpd)
{
int ret;
mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
+ ret = __genpd_poweron(genpd);
mutex_unlock(&genpd->lock);
return ret;
}
-/**
- * pm_genpd_name_poweron - Restore power to a given PM domain and its masters.
- * @domain_name: Name of the PM domain to power up.
- */
-int pm_genpd_name_poweron(const char *domain_name)
-{
- struct generic_pm_domain *genpd;
-
- genpd = pm_genpd_lookup_name(domain_name);
- return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
-}
-
static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev)
{
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev,
- save_state_latency_ns, "state save");
+ return GENPD_DEV_CALLBACK(genpd, int, save_state, dev);
}
static int genpd_restore_dev(struct generic_pm_domain *genpd,
- struct device *dev, bool timed)
+ struct device *dev)
{
- if (!timed)
- return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
-
- return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev,
- restore_state_latency_ns,
- "state restore");
+ return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev);
}
static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -365,13 +287,14 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
}
/**
- * pm_genpd_poweroff - Remove power from a given PM domain.
+ * genpd_poweroff - Remove power from a given PM domain.
* @genpd: PM domain to power down.
+ * @is_async: PM domain is powered down from a scheduled work
*
* If all of the @genpd's devices have been suspended and all of its subdomains
* have been powered down, remove power from @genpd.
*/
-static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
+static int genpd_poweroff(struct generic_pm_domain *genpd, bool is_async)
{
struct pm_domain_data *pdd;
struct gpd_link *link;
@@ -403,7 +326,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
not_suspended++;
}
- if (not_suspended > genpd->in_progress)
+ if (not_suspended > 1 || (not_suspended == 1 && is_async))
return -EBUSY;
if (genpd->gov && genpd->gov->power_down_ok) {
@@ -411,21 +334,6 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
return -EAGAIN;
}
- if (genpd->cpuidle_data) {
- /*
- * If cpuidle_data is set, cpuidle should turn the domain off
- * when the CPU in it is idle. In that case we don't decrement
- * the subdomain counts of the master domains, so that power is
- * not removed from the current domain prematurely as a result
- * of cutting off the masters' power.
- */
- genpd->status = GPD_STATE_POWER_OFF;
- cpuidle_pause_and_lock();
- genpd->cpuidle_data->idle_state->disabled = false;
- cpuidle_resume_and_unlock();
- return 0;
- }
-
if (genpd->power_off) {
int ret;
@@ -434,10 +342,10 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
/*
* If sd_count > 0 at this point, one of the subdomains hasn't
- * managed to call pm_genpd_poweron() for the master yet after
- * incrementing it. In that case pm_genpd_poweron() will wait
+ * managed to call genpd_poweron() for the master yet after
+ * incrementing it. In that case genpd_poweron() will wait
* for us to drop the lock, so we can call .power_off() and let
- * the pm_genpd_poweron() restore power for us (this shouldn't
+ * the genpd_poweron() restore power for us (this shouldn't
* happen very often).
*/
ret = genpd_power_off(genpd, true);
@@ -466,7 +374,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
genpd = container_of(work, struct generic_pm_domain, power_off_work);
mutex_lock(&genpd->lock);
- pm_genpd_poweroff(genpd);
+ genpd_poweroff(genpd, true);
mutex_unlock(&genpd->lock);
}
@@ -482,6 +390,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
bool (*stop_ok)(struct device *__dev);
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ ktime_t time_start;
+ s64 elapsed_ns;
int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -494,16 +405,29 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (stop_ok && !stop_ok(dev))
return -EBUSY;
+ /* Measure suspend latency. */
+ time_start = ktime_get();
+
ret = genpd_save_dev(genpd, dev);
if (ret)
return ret;
ret = genpd_stop_dev(genpd, dev);
if (ret) {
- genpd_restore_dev(genpd, dev, true);
+ genpd_restore_dev(genpd, dev);
return ret;
}
+ /* Update suspend latency value if the measured time exceeds it. */
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->suspend_latency_ns) {
+ td->suspend_latency_ns = elapsed_ns;
+ dev_dbg(dev, "suspend latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+
/*
* If power.irq_safe is set, this routine will be run with interrupts
* off, so it can't use mutexes.
@@ -512,9 +436,7 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0;
mutex_lock(&genpd->lock);
- genpd->in_progress++;
- pm_genpd_poweroff(genpd);
- genpd->in_progress--;
+ genpd_poweroff(genpd, false);
mutex_unlock(&genpd->lock);
return 0;
@@ -531,6 +453,9 @@ static int pm_genpd_runtime_suspend(struct device *dev)
static int pm_genpd_runtime_resume(struct device *dev)
{
struct generic_pm_domain *genpd;
+ struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
+ ktime_t time_start;
+ s64 elapsed_ns;
int ret;
bool timed = true;
@@ -547,15 +472,31 @@ static int pm_genpd_runtime_resume(struct device *dev)
}
mutex_lock(&genpd->lock);
- ret = __pm_genpd_poweron(genpd);
+ ret = __genpd_poweron(genpd);
mutex_unlock(&genpd->lock);
if (ret)
return ret;
out:
- genpd_start_dev(genpd, dev, timed);
- genpd_restore_dev(genpd, dev, timed);
+ /* Measure resume latency. */
+ if (timed)
+ time_start = ktime_get();
+
+ genpd_start_dev(genpd, dev);
+ genpd_restore_dev(genpd, dev);
+
+ /* Update resume latency value if the measured time exceeds it. */
+ if (timed) {
+ elapsed_ns = ktime_to_ns(ktime_sub(ktime_get(), time_start));
+ if (elapsed_ns > td->resume_latency_ns) {
+ td->resume_latency_ns = elapsed_ns;
+ dev_dbg(dev, "resume latency exceeded, %lld ns\n",
+ elapsed_ns);
+ genpd->max_off_time_changed = true;
+ td->constraint_changed = true;
+ }
+ }
return 0;
}
@@ -569,15 +510,15 @@ static int __init pd_ignore_unused_setup(char *__unused)
__setup("pd_ignore_unused", pd_ignore_unused_setup);
/**
- * pm_genpd_poweroff_unused - Power off all PM domains with no devices in use.
+ * genpd_poweroff_unused - Power off all PM domains with no devices in use.
*/
-void pm_genpd_poweroff_unused(void)
+static int __init genpd_poweroff_unused(void)
{
struct generic_pm_domain *genpd;
if (pd_ignore_unused) {
pr_warn("genpd: Not disabling unused power domains\n");
- return;
+ return 0;
}
mutex_lock(&gpd_list_lock);
@@ -586,11 +527,7 @@ void pm_genpd_poweroff_unused(void)
genpd_queue_power_off_work(genpd);
mutex_unlock(&gpd_list_lock);
-}
-static int __init genpd_poweroff_unused(void)
-{
- pm_genpd_poweroff_unused();
return 0;
}
late_initcall(genpd_poweroff_unused);
@@ -764,7 +701,7 @@ static int pm_genpd_prepare(struct device *dev)
/*
* The PM domain must be in the GPD_STATE_ACTIVE state at this point,
- * so pm_genpd_poweron() will return immediately, but if the device
+ * so genpd_poweron() will return immediately, but if the device
* is suspended (e.g. it's been stopped by genpd_stop_dev()), we need
* to make it operational.
*/
@@ -890,7 +827,7 @@ static int pm_genpd_resume_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
genpd->suspended_count--;
- return genpd_start_dev(genpd, dev, true);
+ return genpd_start_dev(genpd, dev);
}
/**
@@ -1018,7 +955,8 @@ static int pm_genpd_thaw_noirq(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true);
+ return genpd->suspend_power_off ?
+ 0 : genpd_start_dev(genpd, dev);
}
/**
@@ -1112,7 +1050,7 @@ static int pm_genpd_restore_noirq(struct device *dev)
pm_genpd_sync_poweron(genpd, true);
- return genpd_start_dev(genpd, dev, true);
+ return genpd_start_dev(genpd, dev);
}
/**
@@ -1317,18 +1255,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
- * __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
- * @domain_name: Name of the PM domain to add the device to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_name_add_device(const char *domain_name, struct device *dev,
- struct gpd_timing_data *td)
-{
- return __pm_genpd_add_device(pm_genpd_lookup_name(domain_name), dev, td);
-}
-
-/**
* pm_genpd_remove_device - Remove a device from an I/O PM domain.
* @genpd: PM domain to remove the device from.
* @dev: Device to be removed.
@@ -1429,35 +1355,6 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd,
}
/**
- * pm_genpd_add_subdomain_names - Add a subdomain to an I/O PM domain.
- * @master_name: Name of the master PM domain to add the subdomain to.
- * @subdomain_name: Name of the subdomain to be added.
- */
-int pm_genpd_add_subdomain_names(const char *master_name,
- const char *subdomain_name)
-{
- struct generic_pm_domain *master = NULL, *subdomain = NULL, *gpd;
-
- if (IS_ERR_OR_NULL(master_name) || IS_ERR_OR_NULL(subdomain_name))
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (!master && !strcmp(gpd->name, master_name))
- master = gpd;
-
- if (!subdomain && !strcmp(gpd->name, subdomain_name))
- subdomain = gpd;
-
- if (master && subdomain)
- break;
- }
- mutex_unlock(&gpd_list_lock);
-
- return pm_genpd_add_subdomain(master, subdomain);
-}
-
-/**
* pm_genpd_remove_subdomain - Remove a subdomain from an I/O PM domain.
* @genpd: Master PM domain to remove the subdomain from.
* @subdomain: Subdomain to be removed.
@@ -1504,124 +1401,6 @@ out:
return ret;
}
-/**
- * pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
- * @genpd: PM domain to be connected with cpuidle.
- * @state: cpuidle state this domain can disable/enable.
- *
- * Make a PM domain behave as though it contained a CPU core, that is, instead
- * of calling its power down routine it will enable the given cpuidle state so
- * that the cpuidle subsystem can power it down (if possible and desirable).
- */
-int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
-{
- struct cpuidle_driver *cpuidle_drv;
- struct gpd_cpuidle_data *cpuidle_data;
- struct cpuidle_state *idle_state;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd) || state < 0)
- return -EINVAL;
-
- mutex_lock(&genpd->lock);
-
- if (genpd->cpuidle_data) {
- ret = -EEXIST;
- goto out;
- }
- cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
- if (!cpuidle_data) {
- ret = -ENOMEM;
- goto out;
- }
- cpuidle_drv = cpuidle_driver_ref();
- if (!cpuidle_drv) {
- ret = -ENODEV;
- goto err_drv;
- }
- if (cpuidle_drv->state_count <= state) {
- ret = -EINVAL;
- goto err;
- }
- idle_state = &cpuidle_drv->states[state];
- if (!idle_state->disabled) {
- ret = -EAGAIN;
- goto err;
- }
- cpuidle_data->idle_state = idle_state;
- cpuidle_data->saved_exit_latency = idle_state->exit_latency;
- genpd->cpuidle_data = cpuidle_data;
- genpd_recalc_cpu_exit_latency(genpd);
-
- out:
- mutex_unlock(&genpd->lock);
- return ret;
-
- err:
- cpuidle_driver_unref();
-
- err_drv:
- kfree(cpuidle_data);
- goto out;
-}
-
-/**
- * pm_genpd_name_attach_cpuidle - Find PM domain and connect cpuidle to it.
- * @name: Name of the domain to connect to cpuidle.
- * @state: cpuidle state this domain can manipulate.
- */
-int pm_genpd_name_attach_cpuidle(const char *name, int state)
-{
- return pm_genpd_attach_cpuidle(pm_genpd_lookup_name(name), state);
-}
-
-/**
- * pm_genpd_detach_cpuidle - Remove the cpuidle connection from a PM domain.
- * @genpd: PM domain to remove the cpuidle connection from.
- *
- * Remove the cpuidle connection set up by pm_genpd_attach_cpuidle() from the
- * given PM domain.
- */
-int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
-{
- struct gpd_cpuidle_data *cpuidle_data;
- struct cpuidle_state *idle_state;
- int ret = 0;
-
- if (IS_ERR_OR_NULL(genpd))
- return -EINVAL;
-
- mutex_lock(&genpd->lock);
-
- cpuidle_data = genpd->cpuidle_data;
- if (!cpuidle_data) {
- ret = -ENODEV;
- goto out;
- }
- idle_state = cpuidle_data->idle_state;
- if (!idle_state->disabled) {
- ret = -EAGAIN;
- goto out;
- }
- idle_state->exit_latency = cpuidle_data->saved_exit_latency;
- cpuidle_driver_unref();
- genpd->cpuidle_data = NULL;
- kfree(cpuidle_data);
-
- out:
- mutex_unlock(&genpd->lock);
- return ret;
-}
-
-/**
- * pm_genpd_name_detach_cpuidle - Find PM domain and disconnect cpuidle from it.
- * @name: Name of the domain to disconnect cpuidle from.
- */
-int pm_genpd_name_detach_cpuidle(const char *name)
-{
- return pm_genpd_detach_cpuidle(pm_genpd_lookup_name(name));
-}
-
/* Default device callbacks for generic PM domains. */
/**
@@ -1688,7 +1467,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
mutex_init(&genpd->lock);
genpd->gov = gov;
INIT_WORK(&genpd->power_off_work, genpd_power_off_work_fn);
- genpd->in_progress = 0;
atomic_set(&genpd->sd_count, 0);
genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE;
genpd->device_count = 0;
@@ -2023,7 +1801,7 @@ int genpd_dev_pm_attach(struct device *dev)
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- ret = pm_genpd_poweron(pd);
+ ret = genpd_poweron(pd);
out:
return ret ? -EPROBE_DEFER : 0;
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index 85e17bacc834..e60dd12e23aa 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -77,10 +77,8 @@ static bool default_stop_ok(struct device *dev)
dev_update_qos_constraint);
if (constraint_ns > 0) {
- constraint_ns -= td->save_state_latency_ns +
- td->stop_latency_ns +
- td->start_latency_ns +
- td->restore_state_latency_ns;
+ constraint_ns -= td->suspend_latency_ns +
+ td->resume_latency_ns;
if (constraint_ns == 0)
return false;
}
diff --git a/drivers/base/power/generic_ops.c b/drivers/base/power/generic_ops.c
index 96a92db83cad..07c3c4a9522d 100644
--- a/drivers/base/power/generic_ops.c
+++ b/drivers/base/power/generic_ops.c
@@ -9,6 +9,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/export.h>
+#include <linux/suspend.h>
#ifdef CONFIG_PM
/**
@@ -296,11 +297,27 @@ void pm_generic_complete(struct device *dev)
if (drv && drv->pm && drv->pm->complete)
drv->pm->complete(dev);
+}
+/**
+ * pm_complete_with_resume_check - Complete a device power transition.
+ * @dev: Device to handle.
+ *
+ * Complete a device power transition during a system-wide power transition and
+ * optionally schedule a runtime resume of the device if the system resume in
+ * progress has been initated by the platform firmware and the device had its
+ * power.direct_complete flag set.
+ */
+void pm_complete_with_resume_check(struct device *dev)
+{
+ pm_generic_complete(dev);
/*
- * Let runtime PM try to suspend devices that haven't been in use before
- * going into the system-wide sleep state we're resuming from.
+ * If the device had been runtime-suspended before the system went into
+ * the sleep state it is going out of and it has never been resumed till
+ * now, resume it in case the firmware powered it up.
*/
- pm_request_idle(dev);
+ if (dev->power.direct_complete && pm_resume_via_firmware())
+ pm_request_resume(dev);
}
+EXPORT_SYMBOL_GPL(pm_complete_with_resume_check);
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/base/power/opp/Makefile b/drivers/base/power/opp/Makefile
new file mode 100644
index 000000000000..33c1e18c41a4
--- /dev/null
+++ b/drivers/base/power/opp/Makefile
@@ -0,0 +1,2 @@
+ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
+obj-y += core.o cpu.o
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp/core.c
index 7ae7cd990fbf..d5c1149ff123 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp/core.c
@@ -11,131 +11,14 @@
* published by the Free Software Foundation.
*/
-#include <linux/cpu.h>
-#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/device.h>
-#include <linux/list.h>
-#include <linux/rculist.h>
-#include <linux/rcupdate.h>
-#include <linux/pm_opp.h>
#include <linux/of.h>
#include <linux/export.h>
-/*
- * Internal data structure organization with the OPP layer library is as
- * follows:
- * dev_opp_list (root)
- * |- device 1 (represents voltage domain 1)
- * | |- opp 1 (availability, freq, voltage)
- * | |- opp 2 ..
- * ... ...
- * | `- opp n ..
- * |- device 2 (represents the next voltage domain)
- * ...
- * `- device m (represents mth voltage domain)
- * device 1, 2.. are represented by dev_opp structure while each opp
- * is represented by the opp structure.
- */
-
-/**
- * struct dev_pm_opp - Generic OPP description structure
- * @node: opp list node. The nodes are maintained throughout the lifetime
- * of boot. It is expected only an optimal set of OPPs are
- * added to the library by the SoC framework.
- * RCU usage: opp list is traversed with RCU locks. node
- * modification is possible realtime, hence the modifications
- * are protected by the dev_opp_list_lock for integrity.
- * IMPORTANT: the opp nodes should be maintained in increasing
- * order.
- * @dynamic: not-created from static DT entries.
- * @available: true/false - marks if this OPP as available or not
- * @turbo: true if turbo (boost) OPP
- * @rate: Frequency in hertz
- * @u_volt: Target voltage in microvolts corresponding to this OPP
- * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
- * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
- * @u_amp: Maximum current drawn by the device in microamperes
- * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
- * frequency from any other OPP's frequency.
- * @dev_opp: points back to the device_opp struct this opp belongs to
- * @rcu_head: RCU callback head used for deferred freeing
- * @np: OPP's device node.
- *
- * This structure stores the OPP information for a given device.
- */
-struct dev_pm_opp {
- struct list_head node;
-
- bool available;
- bool dynamic;
- bool turbo;
- unsigned long rate;
-
- unsigned long u_volt;
- unsigned long u_volt_min;
- unsigned long u_volt_max;
- unsigned long u_amp;
- unsigned long clock_latency_ns;
-
- struct device_opp *dev_opp;
- struct rcu_head rcu_head;
-
- struct device_node *np;
-};
-
-/**
- * struct device_list_opp - devices managed by 'struct device_opp'
- * @node: list node
- * @dev: device to which the struct object belongs
- * @rcu_head: RCU callback head used for deferred freeing
- *
- * This is an internal data structure maintaining the list of devices that are
- * managed by 'struct device_opp'.
- */
-struct device_list_opp {
- struct list_head node;
- const struct device *dev;
- struct rcu_head rcu_head;
-};
-
-/**
- * struct device_opp - Device opp structure
- * @node: list node - contains the devices with OPPs that
- * have been registered. Nodes once added are not modified in this
- * list.
- * RCU usage: nodes are not modified in the list of device_opp,
- * however addition is possible and is secured by dev_opp_list_lock
- * @srcu_head: notifier head to notify the OPP availability changes.
- * @rcu_head: RCU callback head used for deferred freeing
- * @dev_list: list of devices that share these OPPs
- * @opp_list: list of opps
- * @np: struct device_node pointer for opp's DT node.
- * @shared_opp: OPP is shared between multiple devices.
- *
- * This is an internal data structure maintaining the link to opps attached to
- * a device. This structure is not meant to be shared to users as it is
- * meant for book keeping and private to OPP library.
- *
- * Because the opp structures can be used from both rcu and srcu readers, we
- * need to wait for the grace period of both of them before freeing any
- * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
- */
-struct device_opp {
- struct list_head node;
-
- struct srcu_notifier_head srcu_head;
- struct rcu_head rcu_head;
- struct list_head dev_list;
- struct list_head opp_list;
-
- struct device_node *np;
- unsigned long clock_latency_ns_max;
- bool shared_opp;
- struct dev_pm_opp *suspend_opp;
-};
+#include "opp.h"
/*
* The root of the list of all devices. All device_opp structures branch off
@@ -200,7 +83,7 @@ static struct device_opp *_managed_opp(const struct device_node *np)
* is a RCU protected pointer. This means that device_opp is valid as long
* as we are under RCU lock.
*/
-static struct device_opp *_find_device_opp(struct device *dev)
+struct device_opp *_find_device_opp(struct device *dev)
{
struct device_opp *dev_opp;
@@ -579,8 +462,8 @@ static void _remove_list_dev(struct device_list_opp *list_dev,
_kfree_list_dev_rcu);
}
-static struct device_list_opp *_add_list_dev(const struct device *dev,
- struct device_opp *dev_opp)
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp)
{
struct device_list_opp *list_dev;
@@ -828,8 +711,8 @@ static int _opp_add(struct device *dev, struct dev_pm_opp *new_opp,
* The opp is made available by default and it can be controlled using
* dev_pm_opp_enable/disable functions and may be removed by dev_pm_opp_remove.
*
- * NOTE: "dynamic" parameter impacts OPPs added by the of_init_opp_table and
- * freed by of_free_opp_table.
+ * NOTE: "dynamic" parameter impacts OPPs added by the dev_pm_opp_of_add_table
+ * and freed by dev_pm_opp_of_remove_table.
*
* Locking: The internal device_opp and opp structures are RCU protected.
* Hence this function internally uses RCU updater strategy with mutex locks
@@ -1220,7 +1103,8 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
#ifdef CONFIG_OF
/**
- * of_free_opp_table() - Free OPP table entries created from static DT entries
+ * dev_pm_opp_of_remove_table() - Free OPP table entries created from static DT
+ * entries
* @dev: device pointer used to lookup device OPPs.
*
* Free OPPs created using static entries present in DT.
@@ -1231,7 +1115,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_notifier);
* that this function is *NOT* called under RCU protection or in contexts where
* mutex cannot be locked.
*/
-void of_free_opp_table(struct device *dev)
+void dev_pm_opp_of_remove_table(struct device *dev)
{
struct device_opp *dev_opp;
struct dev_pm_opp *opp, *tmp;
@@ -1266,91 +1150,34 @@ void of_free_opp_table(struct device *dev)
unlock:
mutex_unlock(&dev_opp_list_lock);
}
-EXPORT_SYMBOL_GPL(of_free_opp_table);
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_remove_table);
-void of_cpumask_free_opp_table(cpumask_var_t cpumask)
+/* Returns opp descriptor node for a device, caller must do of_node_put() */
+struct device_node *_of_get_opp_desc_node(struct device *dev)
{
- struct device *cpu_dev;
- int cpu;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- of_free_opp_table(cpu_dev);
- }
-}
-EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table);
-
-/* Returns opp descriptor node from its phandle. Caller must do of_node_put() */
-static struct device_node *
-_of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop)
-{
- struct device_node *opp_np;
-
- opp_np = of_find_node_by_phandle(be32_to_cpup(prop->value));
- if (!opp_np) {
- dev_err(dev, "%s: Prop: %s contains invalid opp desc phandle\n",
- __func__, prop->name);
- return ERR_PTR(-EINVAL);
- }
-
- return opp_np;
-}
-
-/* Returns opp descriptor node for a device. Caller must do of_node_put() */
-static struct device_node *_of_get_opp_desc_node(struct device *dev)
-{
- const struct property *prop;
-
- prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
- if (!prop)
- return ERR_PTR(-ENODEV);
- if (!prop->value)
- return ERR_PTR(-ENODATA);
-
/*
* TODO: Support for multiple OPP tables.
*
* There should be only ONE phandle present in "operating-points-v2"
* property.
*/
- if (prop->length != sizeof(__be32)) {
- dev_err(dev, "%s: Invalid opp desc phandle\n", __func__);
- return ERR_PTR(-EINVAL);
- }
- return _of_get_opp_desc_node_from_prop(dev, prop);
+ return of_parse_phandle(dev->of_node, "operating-points-v2", 0);
}
/* Initializes OPP tables based on new bindings */
-static int _of_init_opp_table_v2(struct device *dev,
- const struct property *prop)
+static int _of_add_opp_table_v2(struct device *dev, struct device_node *opp_np)
{
- struct device_node *opp_np, *np;
+ struct device_node *np;
struct device_opp *dev_opp;
int ret = 0, count = 0;
- if (!prop->value)
- return -ENODATA;
-
- /* Get opp node */
- opp_np = _of_get_opp_desc_node_from_prop(dev, prop);
- if (IS_ERR(opp_np))
- return PTR_ERR(opp_np);
-
dev_opp = _managed_opp(opp_np);
if (dev_opp) {
/* OPPs are already managed */
if (!_add_list_dev(dev, dev_opp))
ret = -ENOMEM;
- goto put_opp_np;
+ return ret;
}
/* We have opp-list node now, iterate over it and add OPPs */
@@ -1366,10 +1193,8 @@ static int _of_init_opp_table_v2(struct device *dev,
}
/* There should be one of more OPP defined */
- if (WARN_ON(!count)) {
- ret = -ENOENT;
- goto put_opp_np;
- }
+ if (WARN_ON(!count))
+ return -ENOENT;
dev_opp = _find_device_opp(dev);
if (WARN_ON(IS_ERR(dev_opp))) {
@@ -1380,19 +1205,16 @@ static int _of_init_opp_table_v2(struct device *dev,
dev_opp->np = opp_np;
dev_opp->shared_opp = of_property_read_bool(opp_np, "opp-shared");
- of_node_put(opp_np);
return 0;
free_table:
- of_free_opp_table(dev);
-put_opp_np:
- of_node_put(opp_np);
+ dev_pm_opp_of_remove_table(dev);
return ret;
}
/* Initializes OPP tables based on old-deprecated bindings */
-static int _of_init_opp_table_v1(struct device *dev)
+static int _of_add_opp_table_v1(struct device *dev)
{
const struct property *prop;
const __be32 *val;
@@ -1429,7 +1251,7 @@ static int _of_init_opp_table_v1(struct device *dev)
}
/**
- * of_init_opp_table() - Initialize opp table from device tree
+ * dev_pm_opp_of_add_table() - Initialize opp table from device tree
* @dev: device pointer used to lookup device OPPs.
*
* Register the initial OPP table with the OPP library for given device.
@@ -1451,153 +1273,28 @@ static int _of_init_opp_table_v1(struct device *dev)
* -ENODATA when empty 'operating-points' property is found
* -EINVAL when invalid entries are found in opp-v2 table
*/
-int of_init_opp_table(struct device *dev)
+int dev_pm_opp_of_add_table(struct device *dev)
{
- const struct property *prop;
+ struct device_node *opp_np;
+ int ret;
/*
* OPPs have two version of bindings now. The older one is deprecated,
* try for the new binding first.
*/
- prop = of_find_property(dev->of_node, "operating-points-v2", NULL);
- if (!prop) {
+ opp_np = _of_get_opp_desc_node(dev);
+ if (!opp_np) {
/*
* Try old-deprecated bindings for backward compatibility with
* older dtbs.
*/
- return _of_init_opp_table_v1(dev);
- }
-
- return _of_init_opp_table_v2(dev, prop);
-}
-EXPORT_SYMBOL_GPL(of_init_opp_table);
-
-int of_cpumask_init_opp_table(cpumask_var_t cpumask)
-{
- struct device *cpu_dev;
- int cpu, ret = 0;
-
- WARN_ON(cpumask_empty(cpumask));
-
- for_each_cpu(cpu, cpumask) {
- cpu_dev = get_cpu_device(cpu);
- if (!cpu_dev) {
- pr_err("%s: failed to get cpu%d device\n", __func__,
- cpu);
- continue;
- }
-
- ret = of_init_opp_table(cpu_dev);
- if (ret) {
- pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
- __func__, cpu, ret);
-
- /* Free all other OPPs */
- of_cpumask_free_opp_table(cpumask);
- break;
- }
+ return _of_add_opp_table_v1(dev);
}
- return ret;
-}
-EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table);
-
-/* Required only for V1 bindings, as v2 can manage it from DT itself */
-int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct device_list_opp *list_dev;
- struct device_opp *dev_opp;
- struct device *dev;
- int cpu, ret = 0;
-
- rcu_read_lock();
-
- dev_opp = _find_device_opp(cpu_dev);
- if (IS_ERR(dev_opp)) {
- ret = -EINVAL;
- goto out_rcu_read_unlock;
- }
-
- for_each_cpu(cpu, cpumask) {
- if (cpu == cpu_dev->id)
- continue;
-
- dev = get_cpu_device(cpu);
- if (!dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- continue;
- }
-
- list_dev = _add_list_dev(dev, dev_opp);
- if (!list_dev) {
- dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
- __func__, cpu);
- continue;
- }
- }
-out_rcu_read_unlock:
- rcu_read_unlock();
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(set_cpus_sharing_opps);
-
-/*
- * Works only for OPP v2 bindings.
- *
- * cpumask should be already set to mask of cpu_dev->id.
- * Returns -ENOENT if operating-points-v2 bindings aren't supported.
- */
-int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask)
-{
- struct device_node *np, *tmp_np;
- struct device *tcpu_dev;
- int cpu, ret = 0;
-
- /* Get OPP descriptor node */
- np = _of_get_opp_desc_node(cpu_dev);
- if (IS_ERR(np)) {
- dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__,
- PTR_ERR(np));
- return -ENOENT;
- }
-
- /* OPPs are shared ? */
- if (!of_property_read_bool(np, "opp-shared"))
- goto put_cpu_node;
-
- for_each_possible_cpu(cpu) {
- if (cpu == cpu_dev->id)
- continue;
-
- tcpu_dev = get_cpu_device(cpu);
- if (!tcpu_dev) {
- dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
- __func__, cpu);
- ret = -ENODEV;
- goto put_cpu_node;
- }
-
- /* Get OPP descriptor node */
- tmp_np = _of_get_opp_desc_node(tcpu_dev);
- if (IS_ERR(tmp_np)) {
- dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n",
- __func__, PTR_ERR(tmp_np));
- ret = PTR_ERR(tmp_np);
- goto put_cpu_node;
- }
-
- /* CPUs are sharing opp node */
- if (np == tmp_np)
- cpumask_set_cpu(cpu, cpumask);
-
- of_node_put(tmp_np);
- }
+ ret = _of_add_opp_table_v2(dev, opp_np);
+ of_node_put(opp_np);
-put_cpu_node:
- of_node_put(np);
return ret;
}
-EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps);
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_add_table);
#endif
diff --git a/drivers/base/power/opp/cpu.c b/drivers/base/power/opp/cpu.c
new file mode 100644
index 000000000000..7654c5606307
--- /dev/null
+++ b/drivers/base/power/opp/cpu.c
@@ -0,0 +1,267 @@
+/*
+ * Generic OPP helper interface for CPU device
+ *
+ * Copyright (C) 2009-2014 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/cpu.h>
+#include <linux/cpufreq.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+
+#include "opp.h"
+
+#ifdef CONFIG_CPU_FREQ
+
+/**
+ * dev_pm_opp_init_cpufreq_table() - create a cpufreq table for a device
+ * @dev: device for which we do this operation
+ * @table: Cpufreq table returned back to caller
+ *
+ * Generate a cpufreq table for a provided device- this assumes that the
+ * opp list is already initialized and ready for usage.
+ *
+ * This function allocates required memory for the cpufreq table. It is
+ * expected that the caller does the required maintenance such as freeing
+ * the table as required.
+ *
+ * Returns -EINVAL for bad pointers, -ENODEV if the device is not found, -ENOMEM
+ * if no memory available for the operation (table is not populated), returns 0
+ * if successful and table is populated.
+ *
+ * WARNING: It is important for the callers to ensure refreshing their copy of
+ * the table if any of the mentioned functions have been invoked in the interim.
+ *
+ * Locking: The internal device_opp and opp structures are RCU protected.
+ * Since we just use the regular accessor functions to access the internal data
+ * structures, we use RCU read lock inside this function. As a result, users of
+ * this function DONOT need to use explicit locks for invoking.
+ */
+int dev_pm_opp_init_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ struct dev_pm_opp *opp;
+ struct cpufreq_frequency_table *freq_table = NULL;
+ int i, max_opps, ret = 0;
+ unsigned long rate;
+
+ rcu_read_lock();
+
+ max_opps = dev_pm_opp_get_opp_count(dev);
+ if (max_opps <= 0) {
+ ret = max_opps ? max_opps : -ENODATA;
+ goto out;
+ }
+
+ freq_table = kcalloc((max_opps + 1), sizeof(*freq_table), GFP_ATOMIC);
+ if (!freq_table) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0, rate = 0; i < max_opps; i++, rate++) {
+ /* find next rate */
+ opp = dev_pm_opp_find_freq_ceil(dev, &rate);
+ if (IS_ERR(opp)) {
+ ret = PTR_ERR(opp);
+ goto out;
+ }
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = rate / 1000;
+
+ /* Is Boost/turbo opp ? */
+ if (dev_pm_opp_is_turbo(opp))
+ freq_table[i].flags = CPUFREQ_BOOST_FREQ;
+ }
+
+ freq_table[i].driver_data = i;
+ freq_table[i].frequency = CPUFREQ_TABLE_END;
+
+ *table = &freq_table[0];
+
+out:
+ rcu_read_unlock();
+ if (ret)
+ kfree(freq_table);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_init_cpufreq_table);
+
+/**
+ * dev_pm_opp_free_cpufreq_table() - free the cpufreq table
+ * @dev: device for which we do this operation
+ * @table: table to free
+ *
+ * Free up the table allocated by dev_pm_opp_init_cpufreq_table
+ */
+void dev_pm_opp_free_cpufreq_table(struct device *dev,
+ struct cpufreq_frequency_table **table)
+{
+ if (!table)
+ return;
+
+ kfree(*table);
+ *table = NULL;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_free_cpufreq_table);
+#endif /* CONFIG_CPU_FREQ */
+
+/* Required only for V1 bindings, as v2 can manage it from DT itself */
+int dev_pm_opp_set_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_list_opp *list_dev;
+ struct device_opp *dev_opp;
+ struct device *dev;
+ int cpu, ret = 0;
+
+ rcu_read_lock();
+
+ dev_opp = _find_device_opp(cpu_dev);
+ if (IS_ERR(dev_opp)) {
+ ret = -EINVAL;
+ goto out_rcu_read_unlock;
+ }
+
+ for_each_cpu(cpu, cpumask) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ dev = get_cpu_device(cpu);
+ if (!dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+
+ list_dev = _add_list_dev(dev, dev_opp);
+ if (!list_dev) {
+ dev_err(dev, "%s: failed to add list-dev for cpu%d device\n",
+ __func__, cpu);
+ continue;
+ }
+ }
+out_rcu_read_unlock:
+ rcu_read_unlock();
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_set_sharing_cpus);
+
+#ifdef CONFIG_OF
+void dev_pm_opp_of_cpumask_remove_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ dev_pm_opp_of_remove_table(cpu_dev);
+ }
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_remove_table);
+
+int dev_pm_opp_of_cpumask_add_table(cpumask_var_t cpumask)
+{
+ struct device *cpu_dev;
+ int cpu, ret = 0;
+
+ WARN_ON(cpumask_empty(cpumask));
+
+ for_each_cpu(cpu, cpumask) {
+ cpu_dev = get_cpu_device(cpu);
+ if (!cpu_dev) {
+ pr_err("%s: failed to get cpu%d device\n", __func__,
+ cpu);
+ continue;
+ }
+
+ ret = dev_pm_opp_of_add_table(cpu_dev);
+ if (ret) {
+ pr_err("%s: couldn't find opp table for cpu:%d, %d\n",
+ __func__, cpu, ret);
+
+ /* Free all other OPPs */
+ dev_pm_opp_of_cpumask_remove_table(cpumask);
+ break;
+ }
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_cpumask_add_table);
+
+/*
+ * Works only for OPP v2 bindings.
+ *
+ * cpumask should be already set to mask of cpu_dev->id.
+ * Returns -ENOENT if operating-points-v2 bindings aren't supported.
+ */
+int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, cpumask_var_t cpumask)
+{
+ struct device_node *np, *tmp_np;
+ struct device *tcpu_dev;
+ int cpu, ret = 0;
+
+ /* Get OPP descriptor node */
+ np = _of_get_opp_desc_node(cpu_dev);
+ if (!np) {
+ dev_dbg(cpu_dev, "%s: Couldn't find cpu_dev node.\n", __func__);
+ return -ENOENT;
+ }
+
+ /* OPPs are shared ? */
+ if (!of_property_read_bool(np, "opp-shared"))
+ goto put_cpu_node;
+
+ for_each_possible_cpu(cpu) {
+ if (cpu == cpu_dev->id)
+ continue;
+
+ tcpu_dev = get_cpu_device(cpu);
+ if (!tcpu_dev) {
+ dev_err(cpu_dev, "%s: failed to get cpu%d device\n",
+ __func__, cpu);
+ ret = -ENODEV;
+ goto put_cpu_node;
+ }
+
+ /* Get OPP descriptor node */
+ tmp_np = _of_get_opp_desc_node(tcpu_dev);
+ if (!tmp_np) {
+ dev_err(tcpu_dev, "%s: Couldn't find tcpu_dev node.\n",
+ __func__);
+ ret = -ENOENT;
+ goto put_cpu_node;
+ }
+
+ /* CPUs are sharing opp node */
+ if (np == tmp_np)
+ cpumask_set_cpu(cpu, cpumask);
+
+ of_node_put(tmp_np);
+ }
+
+put_cpu_node:
+ of_node_put(np);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_opp_of_get_sharing_cpus);
+#endif
diff --git a/drivers/base/power/opp/opp.h b/drivers/base/power/opp/opp.h
new file mode 100644
index 000000000000..dcb38f78dae4
--- /dev/null
+++ b/drivers/base/power/opp/opp.h
@@ -0,0 +1,143 @@
+/*
+ * Generic OPP Interface
+ *
+ * Copyright (C) 2009-2010 Texas Instruments Incorporated.
+ * Nishanth Menon
+ * Romit Dasgupta
+ * Kevin Hilman
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DRIVER_OPP_H__
+#define __DRIVER_OPP_H__
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/pm_opp.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+
+/*
+ * Internal data structure organization with the OPP layer library is as
+ * follows:
+ * dev_opp_list (root)
+ * |- device 1 (represents voltage domain 1)
+ * | |- opp 1 (availability, freq, voltage)
+ * | |- opp 2 ..
+ * ... ...
+ * | `- opp n ..
+ * |- device 2 (represents the next voltage domain)
+ * ...
+ * `- device m (represents mth voltage domain)
+ * device 1, 2.. are represented by dev_opp structure while each opp
+ * is represented by the opp structure.
+ */
+
+/**
+ * struct dev_pm_opp - Generic OPP description structure
+ * @node: opp list node. The nodes are maintained throughout the lifetime
+ * of boot. It is expected only an optimal set of OPPs are
+ * added to the library by the SoC framework.
+ * RCU usage: opp list is traversed with RCU locks. node
+ * modification is possible realtime, hence the modifications
+ * are protected by the dev_opp_list_lock for integrity.
+ * IMPORTANT: the opp nodes should be maintained in increasing
+ * order.
+ * @dynamic: not-created from static DT entries.
+ * @available: true/false - marks if this OPP as available or not
+ * @turbo: true if turbo (boost) OPP
+ * @rate: Frequency in hertz
+ * @u_volt: Target voltage in microvolts corresponding to this OPP
+ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP
+ * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP
+ * @u_amp: Maximum current drawn by the device in microamperes
+ * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's
+ * frequency from any other OPP's frequency.
+ * @dev_opp: points back to the device_opp struct this opp belongs to
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @np: OPP's device node.
+ *
+ * This structure stores the OPP information for a given device.
+ */
+struct dev_pm_opp {
+ struct list_head node;
+
+ bool available;
+ bool dynamic;
+ bool turbo;
+ unsigned long rate;
+
+ unsigned long u_volt;
+ unsigned long u_volt_min;
+ unsigned long u_volt_max;
+ unsigned long u_amp;
+ unsigned long clock_latency_ns;
+
+ struct device_opp *dev_opp;
+ struct rcu_head rcu_head;
+
+ struct device_node *np;
+};
+
+/**
+ * struct device_list_opp - devices managed by 'struct device_opp'
+ * @node: list node
+ * @dev: device to which the struct object belongs
+ * @rcu_head: RCU callback head used for deferred freeing
+ *
+ * This is an internal data structure maintaining the list of devices that are
+ * managed by 'struct device_opp'.
+ */
+struct device_list_opp {
+ struct list_head node;
+ const struct device *dev;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct device_opp - Device opp structure
+ * @node: list node - contains the devices with OPPs that
+ * have been registered. Nodes once added are not modified in this
+ * list.
+ * RCU usage: nodes are not modified in the list of device_opp,
+ * however addition is possible and is secured by dev_opp_list_lock
+ * @srcu_head: notifier head to notify the OPP availability changes.
+ * @rcu_head: RCU callback head used for deferred freeing
+ * @dev_list: list of devices that share these OPPs
+ * @opp_list: list of opps
+ * @np: struct device_node pointer for opp's DT node.
+ * @shared_opp: OPP is shared between multiple devices.
+ *
+ * This is an internal data structure maintaining the link to opps attached to
+ * a device. This structure is not meant to be shared to users as it is
+ * meant for book keeping and private to OPP library.
+ *
+ * Because the opp structures can be used from both rcu and srcu readers, we
+ * need to wait for the grace period of both of them before freeing any
+ * resources. And so we have used kfree_rcu() from within call_srcu() handlers.
+ */
+struct device_opp {
+ struct list_head node;
+
+ struct srcu_notifier_head srcu_head;
+ struct rcu_head rcu_head;
+ struct list_head dev_list;
+ struct list_head opp_list;
+
+ struct device_node *np;
+ unsigned long clock_latency_ns_max;
+ bool shared_opp;
+ struct dev_pm_opp *suspend_opp;
+};
+
+/* Routines internal to opp core */
+struct device_opp *_find_device_opp(struct device *dev);
+struct device_list_opp *_add_list_dev(const struct device *dev,
+ struct device_opp *dev_opp);
+struct device_node *_of_get_opp_desc_node(struct device *dev);
+
+#endif /* __DRIVER_OPP_H__ */
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index 51f15bc15774..a1e0b9ab847a 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -25,6 +25,9 @@
*/
bool events_check_enabled __read_mostly;
+/* First wakeup IRQ seen by the kernel in the last cycle. */
+unsigned int pm_wakeup_irq __read_mostly;
+
/* If set and the system is suspending, terminate the suspend. */
static bool pm_abort_suspend __read_mostly;
@@ -91,7 +94,7 @@ struct wakeup_source *wakeup_source_create(const char *name)
if (!ws)
return NULL;
- wakeup_source_prepare(ws, name ? kstrdup(name, GFP_KERNEL) : NULL);
+ wakeup_source_prepare(ws, name ? kstrdup_const(name, GFP_KERNEL) : NULL);
return ws;
}
EXPORT_SYMBOL_GPL(wakeup_source_create);
@@ -154,7 +157,7 @@ void wakeup_source_destroy(struct wakeup_source *ws)
wakeup_source_drop(ws);
wakeup_source_record(ws);
- kfree(ws->name);
+ kfree_const(ws->name);
kfree(ws);
}
EXPORT_SYMBOL_GPL(wakeup_source_destroy);
@@ -868,6 +871,15 @@ EXPORT_SYMBOL_GPL(pm_system_wakeup);
void pm_wakeup_clear(void)
{
pm_abort_suspend = false;
+ pm_wakeup_irq = 0;
+}
+
+void pm_system_irq_wakeup(unsigned int irq_number)
+{
+ if (pm_wakeup_irq == 0) {
+ pm_wakeup_irq = irq_number;
+ pm_system_wakeup();
+ }
}
/**