From 4c62dbbce902cf2afa88cac89ec67c828160f431 Mon Sep 17 00:00:00 2001 From: Jarkko Nikula Date: Fri, 26 Jun 2015 11:27:41 +0300 Subject: ACPI: Remove FSF mailing addresses There is no need to carry potentially outdated Free Software Foundation mailing address in file headers since the COPYING file includes it. Signed-off-by: Jarkko Nikula Signed-off-by: Rafael J. Wysocki --- include/linux/acpi.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'include/linux') diff --git a/include/linux/acpi.h b/include/linux/acpi.h index c471dfc93b71..1c116ee53b1e 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -15,10 +15,6 @@ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ -- cgit v1.2.3 From 019d8817b1b064c2bacfbcf40fc68184438ad05a Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 15 Jul 2015 14:40:06 +0200 Subject: PM / sleep: Allow devices without runtime PM to do direct-complete Don't unset the direct_complete flag on devices that have runtime PM disabled, if they are runtime suspended. This is needed because otherwise ancestor devices wouldn't be able to do direct_complete without adding runtime PM support to all its descendants. Also removes pm_runtime_suspended_if_enabled() because it's now unused. Signed-off-by: Tomeu Vizoso Signed-off-by: Alan Stern Signed-off-by: Rafael J. Wysocki --- Documentation/power/devices.txt | 7 +++++++ Documentation/power/runtime_pm.txt | 4 ---- drivers/base/power/main.c | 2 +- include/linux/pm_runtime.h | 6 ------ 4 files changed, 8 insertions(+), 11 deletions(-) (limited to 'include/linux') diff --git a/Documentation/power/devices.txt b/Documentation/power/devices.txt index d172bce0fd49..8ba6625fdd63 100644 --- a/Documentation/power/devices.txt +++ b/Documentation/power/devices.txt @@ -341,6 +341,13 @@ the phases are: and is entirely responsible for bringing the device back to the functional state as appropriate. + Note that this direct-complete procedure applies even if the device is + disabled for runtime PM; only the runtime-PM status matters. It follows + that if a device has system-sleep callbacks but does not support runtime + PM, then its prepare callback must never return a positive value. This + is because all devices are initially set to runtime-suspended with + runtime PM disabled. + 2. The suspend methods should quiesce the device to stop it from performing I/O. They also may save the device registers and put it into the appropriate low-power state, depending on the bus type the device is on, diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt index e76dc0ad4d2b..0784bc3a2ab5 100644 --- a/Documentation/power/runtime_pm.txt +++ b/Documentation/power/runtime_pm.txt @@ -445,10 +445,6 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h: bool pm_runtime_status_suspended(struct device *dev); - return true if the device's runtime PM status is 'suspended' - bool pm_runtime_suspended_if_enabled(struct device *dev); - - return true if the device's runtime PM status is 'suspended' and its - 'power.disable_depth' field is equal to 1 - void pm_runtime_allow(struct device *dev); - set the power.runtime_auto flag for the device and decrease its usage counter (used by the /sys/devices/.../power/control interface to diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 30b7bbfdc558..1710c26ba097 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -1377,7 +1377,7 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) if (dev->power.direct_complete) { if (pm_runtime_status_suspended(dev)) { pm_runtime_disable(dev); - if (pm_runtime_suspended_if_enabled(dev)) + if (pm_runtime_status_suspended(dev)) goto Complete; pm_runtime_enable(dev); diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 30e84d48bfea..3bdbb4189780 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h @@ -98,11 +98,6 @@ static inline bool pm_runtime_status_suspended(struct device *dev) return dev->power.runtime_status == RPM_SUSPENDED; } -static inline bool pm_runtime_suspended_if_enabled(struct device *dev) -{ - return pm_runtime_status_suspended(dev) && dev->power.disable_depth == 1; -} - static inline bool pm_runtime_enabled(struct device *dev) { return !dev->power.disable_depth; @@ -164,7 +159,6 @@ static inline void device_set_run_wake(struct device *dev, bool enable) {} static inline bool pm_runtime_suspended(struct device *dev) { return false; } static inline bool pm_runtime_active(struct device *dev) { return true; } static inline bool pm_runtime_status_suspended(struct device *dev) { return false; } -static inline bool pm_runtime_suspended_if_enabled(struct device *dev) { return false; } static inline bool pm_runtime_enabled(struct device *dev) { return false; } static inline void pm_runtime_no_callbacks(struct device *dev) {} -- cgit v1.2.3 From 13b2c4a0c3b1cd37ee6bcfbb5b6e2b94e9a75364 Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Mon, 27 Jul 2015 18:03:56 +0300 Subject: PM / QoS: Make it possible to expose device latency tolerance to userspace Typically when a device is created the bus core it belongs to (for example PCI) does not know if the device supports things like latency tolerance. This is left to the driver that binds to the device in question. However, at that time the device has already been created and there is no way to set its dev->power.set_latency_tolerance anymore. So follow what has been done for other PM QoS attributes as well and allow drivers to expose and hide latency tolerance from userspace, if the device supports it. Acked-by: Rafael J. Wysocki Signed-off-by: Mika Westerberg Signed-off-by: Andy Shevchenko Signed-off-by: Lee Jones --- drivers/base/power/power.h | 2 ++ drivers/base/power/qos.c | 37 +++++++++++++++++++++++++++++++++++++ drivers/base/power/sysfs.c | 11 +++++++++++ include/linux/pm_qos.h | 5 +++++ 4 files changed, 55 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index f1a5d95e7b20..998fa6b23084 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -73,6 +73,8 @@ extern int pm_qos_sysfs_add_resume_latency(struct device *dev); extern void pm_qos_sysfs_remove_resume_latency(struct device *dev); extern int pm_qos_sysfs_add_flags(struct device *dev); extern void pm_qos_sysfs_remove_flags(struct device *dev); +extern int pm_qos_sysfs_add_latency_tolerance(struct device *dev); +extern void pm_qos_sysfs_remove_latency_tolerance(struct device *dev); #else /* CONFIG_PM */ diff --git a/drivers/base/power/qos.c b/drivers/base/power/qos.c index e56d538d039e..7f3646e459cb 100644 --- a/drivers/base/power/qos.c +++ b/drivers/base/power/qos.c @@ -883,3 +883,40 @@ int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) mutex_unlock(&dev_pm_qos_mtx); return ret; } + +/** + * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace + * @dev: Device whose latency tolerance to expose + */ +int dev_pm_qos_expose_latency_tolerance(struct device *dev) +{ + int ret; + + if (!dev->power.set_latency_tolerance) + return -EINVAL; + + mutex_lock(&dev_pm_qos_sysfs_mtx); + ret = pm_qos_sysfs_add_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + return ret; +} +EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance); + +/** + * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace + * @dev: Device whose latency tolerance to hide + */ +void dev_pm_qos_hide_latency_tolerance(struct device *dev) +{ + mutex_lock(&dev_pm_qos_sysfs_mtx); + pm_qos_sysfs_remove_latency_tolerance(dev); + mutex_unlock(&dev_pm_qos_sysfs_mtx); + + /* Remove the request from user space now */ + pm_runtime_get_sync(dev); + dev_pm_qos_update_user_latency_tolerance(dev, + PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT); + pm_runtime_put(dev); +} +EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance); diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index d2be3f9c211c..a7b46798c81d 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -738,6 +738,17 @@ void pm_qos_sysfs_remove_flags(struct device *dev) sysfs_unmerge_group(&dev->kobj, &pm_qos_flags_attr_group); } +int pm_qos_sysfs_add_latency_tolerance(struct device *dev) +{ + return sysfs_merge_group(&dev->kobj, + &pm_qos_latency_tolerance_attr_group); +} + +void pm_qos_sysfs_remove_latency_tolerance(struct device *dev) +{ + sysfs_unmerge_group(&dev->kobj, &pm_qos_latency_tolerance_attr_group); +} + void rpm_sysfs_remove(struct device *dev) { sysfs_unmerge_group(&dev->kobj, &pm_runtime_attr_group); diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h index 7b3ae0cffc05..0f65d36c2a75 100644 --- a/include/linux/pm_qos.h +++ b/include/linux/pm_qos.h @@ -161,6 +161,8 @@ void dev_pm_qos_hide_flags(struct device *dev); int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set); s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev); int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val); +int dev_pm_qos_expose_latency_tolerance(struct device *dev); +void dev_pm_qos_hide_latency_tolerance(struct device *dev); static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { @@ -229,6 +231,9 @@ static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev) { return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; } static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val) { return 0; } +static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev) + { return 0; } +static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {} static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; } static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; } -- cgit v1.2.3 From 2e0fed7f7cdc41679e209c5636ad7537dc6210a9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 27 Jul 2015 18:03:59 +0300 Subject: klist: implement klist_prev() klist_prev() gets the previous element in the list. It is useful to traverse through the list in reverse order, for example, to provide LIFO (last in first out) variant of access. Signed-off-by: Andy Shevchenko Acked-by: Greg Kroah-Hartman Signed-off-by: Lee Jones --- include/linux/klist.h | 1 + lib/klist.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 42 insertions(+) (limited to 'include/linux') diff --git a/include/linux/klist.h b/include/linux/klist.h index 61e5b723ae73..953f283f8451 100644 --- a/include/linux/klist.h +++ b/include/linux/klist.h @@ -63,6 +63,7 @@ extern void klist_iter_init(struct klist *k, struct klist_iter *i); extern void klist_iter_init_node(struct klist *k, struct klist_iter *i, struct klist_node *n); extern void klist_iter_exit(struct klist_iter *i); +extern struct klist_node *klist_prev(struct klist_iter *i); extern struct klist_node *klist_next(struct klist_iter *i); #endif diff --git a/lib/klist.c b/lib/klist.c index 89b485a2a58d..d74cf7a29afd 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -323,6 +323,47 @@ static struct klist_node *to_klist_node(struct list_head *n) return container_of(n, struct klist_node, n_node); } +/** + * klist_prev - Ante up prev node in list. + * @i: Iterator structure. + * + * First grab list lock. Decrement the reference count of the previous + * node, if there was one. Grab the prev node, increment its reference + * count, drop the lock, and return that prev node. + */ +struct klist_node *klist_prev(struct klist_iter *i) +{ + void (*put)(struct klist_node *) = i->i_klist->put; + struct klist_node *last = i->i_cur; + struct klist_node *prev; + + spin_lock(&i->i_klist->k_lock); + + if (last) { + prev = to_klist_node(last->n_node.prev); + if (!klist_dec_and_del(last)) + put = NULL; + } else + prev = to_klist_node(i->i_klist->k_list.prev); + + i->i_cur = NULL; + while (prev != to_klist_node(&i->i_klist->k_list)) { + if (likely(!knode_dead(prev))) { + kref_get(&prev->n_ref); + i->i_cur = prev; + break; + } + prev = to_klist_node(prev->n_node.prev); + } + + spin_unlock(&i->i_klist->k_lock); + + if (put && last) + put(last); + return i->i_cur; +} +EXPORT_SYMBOL_GPL(klist_prev); + /** * klist_next - Ante up next node in list. * @i: Iterator structure. -- cgit v1.2.3 From 3d060aeb72113cda0acf906bfe26914fc689506a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 27 Jul 2015 18:04:00 +0300 Subject: driver core: implement device_for_each_child_reverse() The new function device_for_each_child_reverse() is helpful to traverse the registered devices in a reversed order, e.g. in the case when an operation on each device should be done first on the last added device, then on one before last and so on. Signed-off-by: Andy Shevchenko Acked-by: Greg Kroah-Hartman Signed-off-by: Lee Jones --- drivers/base/core.c | 43 +++++++++++++++++++++++++++++++++++++++++++ include/linux/device.h | 2 ++ 2 files changed, 45 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/core.c b/drivers/base/core.c index dafae6d2f7ac..7d6279554afc 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -1252,6 +1252,19 @@ void device_unregister(struct device *dev) } EXPORT_SYMBOL_GPL(device_unregister); +static struct device *prev_device(struct klist_iter *i) +{ + struct klist_node *n = klist_prev(i); + struct device *dev = NULL; + struct device_private *p; + + if (n) { + p = to_device_private_parent(n); + dev = p->device; + } + return dev; +} + static struct device *next_device(struct klist_iter *i) { struct klist_node *n = klist_next(i); @@ -1340,6 +1353,36 @@ int device_for_each_child(struct device *parent, void *data, } EXPORT_SYMBOL_GPL(device_for_each_child); +/** + * device_for_each_child_reverse - device child iterator in reversed order. + * @parent: parent struct device. + * @fn: function to be called for each device. + * @data: data for the callback. + * + * Iterate over @parent's child devices, and call @fn for each, + * passing it @data. + * + * We check the return of @fn each time. If it returns anything + * other than 0, we break out and return that value. + */ +int device_for_each_child_reverse(struct device *parent, void *data, + int (*fn)(struct device *dev, void *data)) +{ + struct klist_iter i; + struct device *child; + int error = 0; + + if (!parent->p) + return 0; + + klist_iter_init(&parent->p->klist_children, &i); + while ((child = prev_device(&i)) && !error) + error = fn(child, data); + klist_iter_exit(&i); + return error; +} +EXPORT_SYMBOL_GPL(device_for_each_child_reverse); + /** * device_find_child - device iterator for locating a particular device. * @parent: parent struct device diff --git a/include/linux/device.h b/include/linux/device.h index 5a31bf3a4024..af6fbc35d8a6 100644 --- a/include/linux/device.h +++ b/include/linux/device.h @@ -958,6 +958,8 @@ extern int __must_check device_add(struct device *dev); extern void device_del(struct device *dev); extern int device_for_each_child(struct device *dev, void *data, int (*fn)(struct device *dev, void *data)); +extern int device_for_each_child_reverse(struct device *dev, void *data, + int (*fn)(struct device *dev, void *data)); extern struct device *device_find_child(struct device *dev, void *data, int (*match)(struct device *dev, void *data)); extern int device_rename(struct device *dev, const char *new_name); -- cgit v1.2.3 From ba2bbfbf63075850bb523e2adb815d45e3509995 Mon Sep 17 00:00:00 2001 From: Ulf Hansson Date: Thu, 18 Jun 2015 15:17:53 +0200 Subject: PM / Domains: Remove intermediate states from the power off sequence Genpd's ->runtime_suspend() (assigned to pm_genpd_runtime_suspend()) doesn't immediately walk the hierarchy of ->runtime_suspend() callbacks. Instead, pm_genpd_runtime_suspend() calls pm_genpd_poweroff() which postpones that until *all* the devices in the genpd are runtime suspended. When pm_genpd_poweroff() discovers that the last device in the genpd is about to be runtime suspended, it calls __pm_genpd_save_device() for *all* the devices in the genpd sequentially. Furthermore, __pm_genpd_save_device() invokes the ->start() callback, walks the hierarchy of the ->runtime_suspend() callbacks and invokes the ->stop() callback. This causes a "thundering herd" problem. Let's address this issue by having pm_genpd_runtime_suspend() immediately walk the hierarchy of the ->runtime_suspend() callbacks, instead of postponing that to the power off sequence via pm_genpd_poweroff(). If the selected ->runtime_suspend() callback doesn't return an error code, call pm_genpd_poweroff() to see if it's feasible to also power off the PM domain. Adopting this change enables us to simplify parts of the code in genpd, for example the locking mechanism. Additionally, it gives some positive side effects, as described below. i) One device's ->runtime_resume() latency is no longer affected by other devices' latencies in a genpd. The complexity genpd has to support the option to abort the power off sequence suffers from latency issues. More precisely, a device that is requested to be runtime resumed, may end up waiting for __pm_genpd_save_device() to complete its operations for *another* device. That's because pm_genpd_poweroff() can't confirm an abort request while it waits for __pm_genpd_save_device() to return. As this patch removes the intermediate states in pm_genpd_poweroff() while powering off the PM domain, we no longer need the ability to abort that sequence. ii) Make pm_runtime[_status]_suspended() reliable when used with genpd. Until the last device in a genpd becomes idle, pm_genpd_runtime_suspend() will return 0 without actually walking the hierarchy of the ->runtime_suspend() callbacks. However, by returning 0 the runtime PM core considers the device as runtime_suspended, so pm_runtime[_status]_suspended() will return true, even though the device isn't (yet) runtime suspended. After this patch, since pm_genpd_runtime_suspend() immediately walks the hierarchy of the ->runtime_suspend() callbacks, pm_runtime[_status]_suspended() will accurately reflect the status of the device. iii) Enable fine-grained PM through runtime PM callbacks in drivers/subsystems. There are currently cases were drivers/subsystems implements runtime PM callbacks to deploy fine-grained PM (e.g. gate clocks, move pinctrl to power-save state, etc.). While using the genpd, pm_genpd_runtime_suspend() postpones invoking these callbacks until *all* the devices in the genpd are runtime suspended. In essence, one runtime resumed device prevents fine-grained PM for other devices within the same genpd. After this patch, since pm_genpd_runtime_suspend() immediately walks the hierarchy of the ->runtime_suspend() callbacks, fine-grained PM is enabled throughout all the levels of runtime PM callbacks. iiii) Enable fine-grained PM for IRQ safe devices Per the definition for an IRQ safe device, its runtime PM callbacks must be able to execute in atomic context. In the path while genpd walks the hierarchy of the ->runtime_suspend() callbacks for the device, it uses a mutex. Therefore, genpd prevents that path to be executed for IRQ safe devices. As this patch changes pm_genpd_runtime_suspend() to immediately walk the hierarchy of the ->runtime_suspend() callbacks and without needing to use a mutex, fine-grained PM is enabled throughout all the levels of runtime PM callbacks for IRQ safe devices. Unfortunately this patch also comes with a drawback, as described in the summary below. Driver's/subsystem's runtime PM callbacks may be invoked even when the genpd hasn't actually powered off the PM domain, potentially introducing unnecessary latency. However, in most cases, saving/restoring register contexts for devices are typically fast operations or can be optimized in device specific ways (e.g. shadow copies of register contents in memory, device-specific checks to see if context has been lost before restoring context, etc.). Still, in some cases the driver/subsystem may suffer from latency if runtime PM is used in a very fine-grained manner (e.g. for each IO request or xfer). To prevent that extra overhead, the driver/subsystem may deploy the runtime PM autosuspend feature. Signed-off-by: Ulf Hansson Reviewed-by: Kevin Hilman Tested-by: Geert Uytterhoeven Tested-by: Lina Iyer Signed-off-by: Rafael J. Wysocki --- drivers/base/power/domain.c | 363 ++++++++------------------------------------ include/linux/pm_domain.h | 7 - 2 files changed, 62 insertions(+), 308 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0ee43c1056e0..a1abe16dfe16 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -114,8 +114,12 @@ static int genpd_stop_dev(struct generic_pm_domain *genpd, struct device *dev) stop_latency_ns, "stop"); } -static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_start_dev(struct generic_pm_domain *genpd, struct device *dev, + bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, start, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, start, dev, start_latency_ns, "start"); } @@ -136,41 +140,6 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } -static void genpd_acquire_lock(struct generic_pm_domain *genpd) -{ - DEFINE_WAIT(wait); - - mutex_lock(&genpd->lock); - /* - * Wait for the domain to transition into either the active, - * or the power off state. - */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status == GPD_STATE_ACTIVE - || genpd->status == GPD_STATE_POWER_OFF) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); -} - -static void genpd_release_lock(struct generic_pm_domain *genpd) -{ - mutex_unlock(&genpd->lock); -} - -static void genpd_set_active(struct generic_pm_domain *genpd) -{ - if (genpd->resume_count == 0) - genpd->status = GPD_STATE_ACTIVE; -} - static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd) { s64 usecs64; @@ -251,35 +220,14 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool timed) * resume a device belonging to it. */ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct gpd_link *link; - DEFINE_WAIT(wait); int ret = 0; - /* If the domain's master is being waited for, we have to wait too. */ - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - if (genpd->status != GPD_STATE_WAIT_MASTER) - break; - mutex_unlock(&genpd->lock); - - schedule(); - - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - if (genpd->status == GPD_STATE_ACTIVE || (genpd->prepared_count > 0 && genpd->suspend_power_off)) return 0; - if (genpd->status != GPD_STATE_POWER_OFF) { - genpd_set_active(genpd); - return 0; - } - if (genpd->cpuidle_data) { cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = true; @@ -294,20 +242,8 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) */ list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_inc(link->master); - genpd->status = GPD_STATE_WAIT_MASTER; - - mutex_unlock(&genpd->lock); ret = pm_genpd_poweron(link->master); - - mutex_lock(&genpd->lock); - - /* - * The "wait for parent" status is guaranteed not to change - * while the master is powering on. - */ - genpd->status = GPD_STATE_POWER_OFF; - wake_up_all(&genpd->status_wait_queue); if (ret) { genpd_sd_counter_dec(link->master); goto err; @@ -319,8 +255,7 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd) goto err; out: - genpd_set_active(genpd); - + genpd->status = GPD_STATE_ACTIVE; return 0; err: @@ -356,20 +291,18 @@ int pm_genpd_name_poweron(const char *domain_name) return genpd ? pm_genpd_poweron(genpd) : -EINVAL; } -static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd, - struct device *dev) -{ - return GENPD_DEV_CALLBACK(genpd, int, start, dev); -} - static int genpd_save_dev(struct generic_pm_domain *genpd, struct device *dev) { return GENPD_DEV_TIMED_CALLBACK(genpd, int, save_state, dev, save_state_latency_ns, "state save"); } -static int genpd_restore_dev(struct generic_pm_domain *genpd, struct device *dev) +static int genpd_restore_dev(struct generic_pm_domain *genpd, + struct device *dev, bool timed) { + if (!timed) + return GENPD_DEV_CALLBACK(genpd, int, restore_state, dev); + return GENPD_DEV_TIMED_CALLBACK(genpd, int, restore_state, dev, restore_state_latency_ns, "state restore"); @@ -415,89 +348,6 @@ static int genpd_dev_pm_qos_notifier(struct notifier_block *nb, return NOTIFY_DONE; } -/** - * __pm_genpd_save_device - Save the pre-suspend state of a device. - * @pdd: Domain data of the device to save the state of. - * @genpd: PM domain the device belongs to. - */ -static int __pm_genpd_save_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int ret = 0; - - if (gpd_data->need_restore > 0) - return 0; - - /* - * If the value of the need_restore flag is still unknown at this point, - * we trust that pm_genpd_poweroff() has verified that the device is - * already runtime PM suspended. - */ - if (gpd_data->need_restore < 0) { - gpd_data->need_restore = 1; - return 0; - } - - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - ret = genpd_save_dev(genpd, dev); - genpd_stop_dev(genpd, dev); - - mutex_lock(&genpd->lock); - - if (!ret) - gpd_data->need_restore = 1; - - return ret; -} - -/** - * __pm_genpd_restore_device - Restore the pre-suspend state of a device. - * @pdd: Domain data of the device to restore the state of. - * @genpd: PM domain the device belongs to. - */ -static void __pm_genpd_restore_device(struct pm_domain_data *pdd, - struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) -{ - struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); - struct device *dev = pdd->dev; - int need_restore = gpd_data->need_restore; - - gpd_data->need_restore = 0; - mutex_unlock(&genpd->lock); - - genpd_start_dev(genpd, dev); - - /* - * Call genpd_restore_dev() for recently added devices too (need_restore - * is negative then). - */ - if (need_restore) - genpd_restore_dev(genpd, dev); - - mutex_lock(&genpd->lock); -} - -/** - * genpd_abort_poweroff - Check if a PM domain power off should be aborted. - * @genpd: PM domain to check. - * - * Return true if a PM domain's status changed to GPD_STATE_ACTIVE during - * a "power off" operation, which means that a "power on" has occured in the - * meantime, or if its resume_count field is different from zero, which means - * that one of its devices has been resumed in the meantime. - */ -static bool genpd_abort_poweroff(struct generic_pm_domain *genpd) -{ - return genpd->status == GPD_STATE_WAIT_MASTER - || genpd->status == GPD_STATE_ACTIVE || genpd->resume_count > 0; -} - /** * genpd_queue_power_off_work - Queue up the execution of pm_genpd_poweroff(). * @genpd: PM domait to power off. @@ -515,34 +365,26 @@ static void genpd_queue_power_off_work(struct generic_pm_domain *genpd) * @genpd: PM domain to power down. * * If all of the @genpd's devices have been suspended and all of its subdomains - * have been powered down, run the runtime suspend callbacks provided by all of - * the @genpd's devices' drivers and remove power from @genpd. + * have been powered down, remove power from @genpd. */ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) - __releases(&genpd->lock) __acquires(&genpd->lock) { struct pm_domain_data *pdd; struct gpd_link *link; - unsigned int not_suspended; - int ret = 0; + unsigned int not_suspended = 0; - start: /* * Do not try to power off the domain in the following situations: * (1) The domain is already in the "power off" state. - * (2) The domain is waiting for its master to power up. - * (3) One of the domain's devices is being resumed right now. - * (4) System suspend is in progress. + * (2) System suspend is in progress. */ if (genpd->status == GPD_STATE_POWER_OFF - || genpd->status == GPD_STATE_WAIT_MASTER - || genpd->resume_count > 0 || genpd->prepared_count > 0) + || genpd->prepared_count > 0) return 0; if (atomic_read(&genpd->sd_count) > 0) return -EBUSY; - not_suspended = 0; list_for_each_entry(pdd, &genpd->dev_list, list_node) { enum pm_qos_flags_status stat; @@ -560,41 +402,11 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) if (not_suspended > genpd->in_progress) return -EBUSY; - if (genpd->poweroff_task) { - /* - * Another instance of pm_genpd_poweroff() is executing - * callbacks, so tell it to start over and return. - */ - genpd->status = GPD_STATE_REPEAT; - return 0; - } - if (genpd->gov && genpd->gov->power_down_ok) { if (!genpd->gov->power_down_ok(&genpd->domain)) return -EAGAIN; } - genpd->status = GPD_STATE_BUSY; - genpd->poweroff_task = current; - - list_for_each_entry_reverse(pdd, &genpd->dev_list, list_node) { - ret = atomic_read(&genpd->sd_count) == 0 ? - __pm_genpd_save_device(pdd, genpd) : -EBUSY; - - if (genpd_abort_poweroff(genpd)) - goto out; - - if (ret) { - genpd_set_active(genpd); - goto out; - } - - if (genpd->status == GPD_STATE_REPEAT) { - genpd->poweroff_task = NULL; - goto start; - } - } - if (genpd->cpuidle_data) { /* * If cpuidle_data is set, cpuidle should turn the domain off @@ -607,14 +419,14 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) cpuidle_pause_and_lock(); genpd->cpuidle_data->idle_state->disabled = false; cpuidle_resume_and_unlock(); - goto out; + return 0; } if (genpd->power_off) { - if (atomic_read(&genpd->sd_count) > 0) { - ret = -EBUSY; - goto out; - } + int ret; + + if (atomic_read(&genpd->sd_count) > 0) + return -EBUSY; /* * If sd_count > 0 at this point, one of the subdomains hasn't @@ -625,10 +437,8 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) * happen very often). */ ret = genpd_power_off(genpd, true); - if (ret == -EBUSY) { - genpd_set_active(genpd); - goto out; - } + if (ret) + return ret; } genpd->status = GPD_STATE_POWER_OFF; @@ -638,10 +448,7 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd) genpd_queue_power_off_work(link->master); } - out: - genpd->poweroff_task = NULL; - wake_up_all(&genpd->status_wait_queue); - return ret; + return 0; } /** @@ -654,9 +461,9 @@ static void genpd_power_off_work_fn(struct work_struct *work) genpd = container_of(work, struct generic_pm_domain, power_off_work); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); pm_genpd_poweroff(genpd); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); } /** @@ -670,7 +477,6 @@ static void genpd_power_off_work_fn(struct work_struct *work) static int pm_genpd_runtime_suspend(struct device *dev) { struct generic_pm_domain *genpd; - struct generic_pm_domain_data *gpd_data; bool (*stop_ok)(struct device *__dev); int ret; @@ -684,10 +490,16 @@ static int pm_genpd_runtime_suspend(struct device *dev) if (stop_ok && !stop_ok(dev)) return -EBUSY; - ret = genpd_stop_dev(genpd, dev); + ret = genpd_save_dev(genpd, dev); if (ret) return ret; + ret = genpd_stop_dev(genpd, dev); + if (ret) { + genpd_restore_dev(genpd, dev, true); + return ret; + } + /* * If power.irq_safe is set, this routine will be run with interrupts * off, so it can't use mutexes. @@ -696,16 +508,6 @@ static int pm_genpd_runtime_suspend(struct device *dev) return 0; mutex_lock(&genpd->lock); - - /* - * If we have an unknown state of the need_restore flag, it means none - * of the runtime PM callbacks has been invoked yet. Let's update the - * flag to reflect that the current state is active. - */ - gpd_data = to_gpd_data(dev->power.subsys_data->domain_data); - if (gpd_data->need_restore < 0) - gpd_data->need_restore = 0; - genpd->in_progress++; pm_genpd_poweroff(genpd); genpd->in_progress--; @@ -725,8 +527,8 @@ static int pm_genpd_runtime_suspend(struct device *dev) static int pm_genpd_runtime_resume(struct device *dev) { struct generic_pm_domain *genpd; - DEFINE_WAIT(wait); int ret; + bool timed = true; dev_dbg(dev, "%s()\n", __func__); @@ -735,39 +537,21 @@ static int pm_genpd_runtime_resume(struct device *dev) return -EINVAL; /* If power.irq_safe, the PM domain is never powered off. */ - if (dev->power.irq_safe) - return genpd_start_dev_no_timing(genpd, dev); + if (dev->power.irq_safe) { + timed = false; + goto out; + } mutex_lock(&genpd->lock); ret = __pm_genpd_poweron(genpd); - if (ret) { - mutex_unlock(&genpd->lock); - return ret; - } - genpd->status = GPD_STATE_BUSY; - genpd->resume_count++; - for (;;) { - prepare_to_wait(&genpd->status_wait_queue, &wait, - TASK_UNINTERRUPTIBLE); - /* - * If current is the powering off task, we have been called - * reentrantly from one of the device callbacks, so we should - * not wait. - */ - if (!genpd->poweroff_task || genpd->poweroff_task == current) - break; - mutex_unlock(&genpd->lock); + mutex_unlock(&genpd->lock); - schedule(); + if (ret) + return ret; - mutex_lock(&genpd->lock); - } - finish_wait(&genpd->status_wait_queue, &wait); - __pm_genpd_restore_device(dev->power.subsys_data->domain_data, genpd); - genpd->resume_count--; - genpd_set_active(genpd); - wake_up_all(&genpd->status_wait_queue); - mutex_unlock(&genpd->lock); + out: + genpd_start_dev(genpd, dev, timed); + genpd_restore_dev(genpd, dev, timed); return 0; } @@ -883,7 +667,7 @@ static void pm_genpd_sync_poweron(struct generic_pm_domain *genpd, { struct gpd_link *link; - if (genpd->status != GPD_STATE_POWER_OFF) + if (genpd->status == GPD_STATE_ACTIVE) return; list_for_each_entry(link, &genpd->slave_links, slave_node) { @@ -960,14 +744,14 @@ static int pm_genpd_prepare(struct device *dev) if (resume_needed(dev, genpd)) pm_runtime_resume(dev); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count++ == 0) { genpd->suspended_count = 0; genpd->suspend_power_off = genpd->status == GPD_STATE_POWER_OFF; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (genpd->suspend_power_off) { pm_runtime_put_noidle(dev); @@ -1102,7 +886,7 @@ static int pm_genpd_resume_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); genpd->suspended_count--; - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1230,7 +1014,7 @@ static int pm_genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev); + return genpd->suspend_power_off ? 0 : genpd_start_dev(genpd, dev, true); } /** @@ -1324,7 +1108,7 @@ static int pm_genpd_restore_noirq(struct device *dev) pm_genpd_sync_poweron(genpd, true); - return genpd_start_dev(genpd, dev); + return genpd_start_dev(genpd, dev, true); } /** @@ -1440,7 +1224,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, gpd_data->td = *td; gpd_data->base.dev = dev; - gpd_data->need_restore = -1; gpd_data->td.constraint_changed = true; gpd_data->td.effective_constraint_ns = -1; gpd_data->nb.notifier_call = genpd_dev_pm_qos_notifier; @@ -1502,7 +1285,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (IS_ERR(gpd_data)) return PTR_ERR(gpd_data); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1519,7 +1302,7 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); if (ret) genpd_free_dev_data(dev, gpd_data); @@ -1563,7 +1346,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, gpd_data = to_gpd_data(pdd); dev_pm_qos_remove_notifier(dev, &gpd_data->nb); - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->prepared_count > 0) { ret = -EAGAIN; @@ -1578,14 +1361,14 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd, list_del_init(&pdd->list_node); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); genpd_free_dev_data(dev, gpd_data); return 0; out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); dev_pm_qos_add_notifier(dev, &gpd_data->nb); return ret; @@ -1606,17 +1389,9 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, || genpd == subdomain) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - if (genpd->status == GPD_STATE_POWER_OFF && subdomain->status != GPD_STATE_POWER_OFF) { ret = -EINVAL; @@ -1644,7 +1419,7 @@ int pm_genpd_add_subdomain(struct generic_pm_domain *genpd, out: mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1692,8 +1467,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, if (IS_ERR_OR_NULL(genpd) || IS_ERR_OR_NULL(subdomain)) return -EINVAL; - start: - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); list_for_each_entry(link, &genpd->master_links, master_node) { if (link->slave != subdomain) @@ -1701,13 +1475,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, mutex_lock_nested(&subdomain->lock, SINGLE_DEPTH_NESTING); - if (subdomain->status != GPD_STATE_POWER_OFF - && subdomain->status != GPD_STATE_ACTIVE) { - mutex_unlock(&subdomain->lock); - genpd_release_lock(genpd); - goto start; - } - list_del(&link->master_node); list_del(&link->slave_node); kfree(link); @@ -1720,7 +1487,7 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd, break; } - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1744,7 +1511,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) if (IS_ERR_OR_NULL(genpd) || state < 0) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); if (genpd->cpuidle_data) { ret = -EEXIST; @@ -1775,7 +1542,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state) genpd_recalc_cpu_exit_latency(genpd); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; err: @@ -1812,7 +1579,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) if (IS_ERR_OR_NULL(genpd)) return -EINVAL; - genpd_acquire_lock(genpd); + mutex_lock(&genpd->lock); cpuidle_data = genpd->cpuidle_data; if (!cpuidle_data) { @@ -1830,7 +1597,7 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd) kfree(cpuidle_data); out: - genpd_release_lock(genpd); + mutex_unlock(&genpd->lock); return ret; } @@ -1912,9 +1679,6 @@ void pm_genpd_init(struct generic_pm_domain *genpd, genpd->in_progress = 0; atomic_set(&genpd->sd_count, 0); genpd->status = is_off ? GPD_STATE_POWER_OFF : GPD_STATE_ACTIVE; - init_waitqueue_head(&genpd->status_wait_queue); - genpd->poweroff_task = NULL; - genpd->resume_count = 0; genpd->device_count = 0; genpd->max_off_time_ns = -1; genpd->max_off_time_changed = true; @@ -2293,9 +2057,6 @@ static int pm_genpd_summary_one(struct seq_file *s, { static const char * const status_lookup[] = { [GPD_STATE_ACTIVE] = "on", - [GPD_STATE_WAIT_MASTER] = "wait-master", - [GPD_STATE_BUSY] = "busy", - [GPD_STATE_REPEAT] = "off-in-progress", [GPD_STATE_POWER_OFF] = "off" }; struct pm_domain_data *pm_data; diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index 681ccb053f72..b2725e6e8e7b 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -22,9 +22,6 @@ enum gpd_status { GPD_STATE_ACTIVE = 0, /* PM domain is active */ - GPD_STATE_WAIT_MASTER, /* PM domain's master is being waited for */ - GPD_STATE_BUSY, /* Something is happening to the PM domain */ - GPD_STATE_REPEAT, /* Power off in progress, to be repeated */ GPD_STATE_POWER_OFF, /* PM domain is off */ }; @@ -59,9 +56,6 @@ struct generic_pm_domain { unsigned int in_progress; /* Number of devices being suspended now */ atomic_t sd_count; /* Number of subdomains with power "on" */ enum gpd_status status; /* Current state of the domain */ - wait_queue_head_t status_wait_queue; - struct task_struct *poweroff_task; /* Powering off task */ - unsigned int resume_count; /* Number of devices being resumed */ unsigned int device_count; /* Number of devices */ unsigned int suspended_count; /* System suspend device counter */ unsigned int prepared_count; /* Suspend counter of prepared devices */ @@ -113,7 +107,6 @@ struct generic_pm_domain_data { struct pm_domain_data base; struct gpd_timing_data td; struct notifier_block nb; - int need_restore; }; #ifdef CONFIG_PM_GENERIC_DOMAINS -- cgit v1.2.3 From 3ca9bb33c627f22640cfca97fdf88eec0a120dfd Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 29 Jul 2015 16:23:03 +0530 Subject: PM / OPP: Add clock-latency-ns support With "operating-points-v2" bindings, clock-latency is defined per OPP. Users of this value expect a single value which defines the latency to switch to any clock rate. Find maximum clock-latency-ns from the OPP table to service requests from such users. Reviewed-by: Stephen Boyd Reviewed-by: Bartlomiej Zolnierkiewicz Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp.c | 41 +++++++++++++++++++++++++++++++++++++++-- include/linux/pm_opp.h | 6 ++++++ 2 files changed, 45 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 0e0eff4d9299..8638204c457e 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -57,6 +57,8 @@ * @u_volt_min: Minimum voltage in microvolts corresponding to this OPP * @u_volt_max: Maximum voltage in microvolts corresponding to this OPP * @u_amp: Maximum current drawn by the device in microamperes + * @clock_latency_ns: Latency (in nanoseconds) of switching to this OPP's + * frequency from any other OPP's frequency. * @dev_opp: points back to the device_opp struct this opp belongs to * @rcu_head: RCU callback head used for deferred freeing * @np: OPP's device node. @@ -75,6 +77,7 @@ struct dev_pm_opp { unsigned long u_volt_min; unsigned long u_volt_max; unsigned long u_amp; + unsigned long clock_latency_ns; struct device_opp *dev_opp; struct rcu_head rcu_head; @@ -109,6 +112,8 @@ struct device_opp { struct srcu_notifier_head srcu_head; struct rcu_head rcu_head; struct list_head opp_list; + + unsigned long clock_latency_ns_max; }; /* @@ -225,6 +230,32 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +/** + * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds + * @dev: device for which we do this operation + * + * Return: This function returns the max clock latency in nanoseconds. + * + * Locking: This function takes rcu_read_lock(). + */ +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + struct device_opp *dev_opp; + unsigned long clock_latency_ns; + + rcu_read_lock(); + + dev_opp = _find_device_opp(dev); + if (IS_ERR(dev_opp)) + clock_latency_ns = 0; + else + clock_latency_ns = dev_opp->clock_latency_ns_max; + + rcu_read_unlock(); + return clock_latency_ns; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_get_max_clock_latency); + /** * dev_pm_opp_get_opp_count() - Get number of opps available in the opp list * @dev: device for which we do this operation @@ -779,6 +810,8 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) new_opp->np = np; new_opp->dynamic = false; new_opp->available = true; + of_property_read_u32(np, "clock-latency-ns", + (u32 *)&new_opp->clock_latency_ns); ret = opp_get_microvolt(new_opp, dev); if (ret) @@ -790,11 +823,15 @@ static int _opp_add_static_v2(struct device *dev, struct device_node *np) if (ret) goto free_opp; + if (new_opp->clock_latency_ns > dev_opp->clock_latency_ns_max) + dev_opp->clock_latency_ns_max = new_opp->clock_latency_ns; + mutex_unlock(&dev_opp_list_lock); - pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu\n", + pr_debug("%s: turbo:%d rate:%lu uv:%lu uvmin:%lu uvmax:%lu latency:%lu\n", __func__, new_opp->turbo, new_opp->rate, new_opp->u_volt, - new_opp->u_volt_min, new_opp->u_volt_max); + new_opp->u_volt_min, new_opp->u_volt_max, + new_opp->clock_latency_ns); /* * Notify the changes in the availability of the operable diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index cec2d4540914..20324b579adc 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -31,6 +31,7 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); int dev_pm_opp_get_opp_count(struct device *dev); +unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, @@ -67,6 +68,11 @@ static inline int dev_pm_opp_get_opp_count(struct device *dev) return 0; } +static inline unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev) +{ + return 0; +} + static inline struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev, unsigned long freq, bool available) { -- cgit v1.2.3 From 8d4d4e98acd68c31435ebb7beea591dbf60b9eb2 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Fri, 12 Jun 2015 17:10:38 +0530 Subject: PM / OPP: Add helpers for initializing CPU OPPs With "operating-points-v2" its possible to tell which devices share OPPs. We already have infrastructure to decode that information. This patch adds following APIs: - of_get_cpus_sharing_opps: Returns cpumask of CPUs sharing OPPs (only valid with v2 bindings). - of_cpumask_init_opp_table: Initializes OPPs for all CPUs present in cpumask. - of_cpumask_free_opp_table: Frees OPPs for all CPUs present in cpumask. - set_cpus_sharing_opps: Sets which CPUs share OPPs (only valid with old OPP bindings, as this information isn't present in DT). Reviewed-by: Stephen Boyd Reviewed-by: Bartlomiej Zolnierkiewicz Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp.c | 175 +++++++++++++++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 23 +++++++ 2 files changed, 198 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 0ebcea49145a..663aae1c9834 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -11,6 +11,7 @@ * published by the Free Software Foundation. */ +#include #include #include #include @@ -1195,6 +1196,26 @@ unlock: } EXPORT_SYMBOL_GPL(of_free_opp_table); +void of_cpumask_free_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + of_free_opp_table(cpu_dev); + } +} +EXPORT_SYMBOL_GPL(of_cpumask_free_opp_table); + /* Returns opp descriptor node from its phandle. Caller must do of_node_put() */ static struct device_node * _of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) @@ -1211,6 +1232,31 @@ _of_get_opp_desc_node_from_prop(struct device *dev, const struct property *prop) return opp_np; } +/* Returns opp descriptor node for a device. Caller must do of_node_put() */ +static struct device_node *_of_get_opp_desc_node(struct device *dev) +{ + const struct property *prop; + + prop = of_find_property(dev->of_node, "operating-points-v2", NULL); + if (!prop) + return ERR_PTR(-ENODEV); + if (!prop->value) + return ERR_PTR(-ENODATA); + + /* + * TODO: Support for multiple OPP tables. + * + * There should be only ONE phandle present in "operating-points-v2" + * property. + */ + if (prop->length != sizeof(__be32)) { + dev_err(dev, "%s: Invalid opp desc phandle\n", __func__); + return ERR_PTR(-EINVAL); + } + + return _of_get_opp_desc_node_from_prop(dev, prop); +} + /* Initializes OPP tables based on new bindings */ static int _of_init_opp_table_v2(struct device *dev, const struct property *prop) @@ -1351,4 +1397,133 @@ int of_init_opp_table(struct device *dev) return _of_init_opp_table_v2(dev, prop); } EXPORT_SYMBOL_GPL(of_init_opp_table); + +int of_cpumask_init_opp_table(cpumask_var_t cpumask) +{ + struct device *cpu_dev; + int cpu, ret = 0; + + WARN_ON(cpumask_empty(cpumask)); + + for_each_cpu(cpu, cpumask) { + cpu_dev = get_cpu_device(cpu); + if (!cpu_dev) { + pr_err("%s: failed to get cpu%d device\n", __func__, + cpu); + continue; + } + + ret = of_init_opp_table(cpu_dev); + if (ret) { + pr_err("%s: couldn't find opp table for cpu:%d, %d\n", + __func__, cpu, ret); + + /* Free all other OPPs */ + of_cpumask_free_opp_table(cpumask); + break; + } + } + + return ret; +} +EXPORT_SYMBOL_GPL(of_cpumask_init_opp_table); + +/* Required only for V1 bindings, as v2 can manage it from DT itself */ +int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_list_opp *list_dev; + struct device_opp *dev_opp; + struct device *dev; + int cpu, ret = 0; + + rcu_read_lock(); + + dev_opp = _find_device_opp(cpu_dev); + if (IS_ERR(dev_opp)) { + ret = -EINVAL; + goto out_rcu_read_unlock; + } + + for_each_cpu(cpu, cpumask) { + if (cpu == cpu_dev->id) + continue; + + dev = get_cpu_device(cpu); + if (!dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + continue; + } + + list_dev = _add_list_dev(dev, dev_opp); + if (!list_dev) { + dev_err(dev, "%s: failed to add list-dev for cpu%d device\n", + __func__, cpu); + continue; + } + } +out_rcu_read_unlock: + rcu_read_unlock(); + + return 0; +} +EXPORT_SYMBOL_GPL(set_cpus_sharing_opps); + +/* + * Works only for OPP v2 bindings. + * + * cpumask should be already set to mask of cpu_dev->id. + * Returns -ENOENT if operating-points-v2 bindings aren't supported. + */ +int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + struct device_node *np, *tmp_np; + struct device *tcpu_dev; + int cpu, ret = 0; + + /* Get OPP descriptor node */ + np = _of_get_opp_desc_node(cpu_dev); + if (IS_ERR(np)) { + dev_dbg(cpu_dev, "%s: Couldn't find opp node: %ld\n", __func__, + PTR_ERR(np)); + return -ENOENT; + } + + /* OPPs are shared ? */ + if (!of_property_read_bool(np, "opp-shared")) + goto put_cpu_node; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) { + dev_err(cpu_dev, "%s: failed to get cpu%d device\n", + __func__, cpu); + ret = -ENODEV; + goto put_cpu_node; + } + + /* Get OPP descriptor node */ + tmp_np = _of_get_opp_desc_node(tcpu_dev); + if (IS_ERR(tmp_np)) { + dev_err(tcpu_dev, "%s: Couldn't find opp node: %ld\n", + __func__, PTR_ERR(tmp_np)); + ret = PTR_ERR(tmp_np); + goto put_cpu_node; + } + + /* CPUs are sharing opp node */ + if (np == tmp_np) + cpumask_set_cpu(cpu, cpumask); + + of_node_put(tmp_np); + } + +put_cpu_node: + of_node_put(np); + return ret; +} +EXPORT_SYMBOL_GPL(of_get_cpus_sharing_opps); #endif diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 20324b579adc..bb52fae5b921 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -121,6 +121,10 @@ static inline struct srcu_notifier_head *dev_pm_opp_get_notifier( #if defined(CONFIG_PM_OPP) && defined(CONFIG_OF) int of_init_opp_table(struct device *dev); void of_free_opp_table(struct device *dev); +int of_cpumask_init_opp_table(cpumask_var_t cpumask); +void of_cpumask_free_opp_table(cpumask_var_t cpumask); +int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); +int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask); #else static inline int of_init_opp_table(struct device *dev) { @@ -130,6 +134,25 @@ static inline int of_init_opp_table(struct device *dev) static inline void of_free_opp_table(struct device *dev) { } + +static inline int of_cpumask_init_opp_table(cpumask_var_t cpumask) +{ + return -ENOSYS; +} + +static inline void of_cpumask_free_opp_table(cpumask_var_t cpumask) +{ +} + +static inline int of_get_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + return -ENOSYS; +} + +static inline int set_cpus_sharing_opps(struct device *cpu_dev, cpumask_var_t cpumask) +{ + return -ENOSYS; +} #endif #endif /* __LINUX_OPP_H__ */ -- cgit v1.2.3 From 19445b25e350ebebaa304bb2135619f643302947 Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 9 Jul 2015 17:43:35 +0200 Subject: PM / OPP: add dev_pm_opp_is_turbo() helper Add dev_pm_opp_is_turbo() helper to verify if an opp is to be used only for turbo mode or not. Reviewed-by: Stephen Boyd Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/base/power/opp.c | 34 ++++++++++++++++++++++++++++++++++ include/linux/pm_opp.h | 7 +++++++ 2 files changed, 41 insertions(+) (limited to 'include/linux') diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c index 663aae1c9834..204c6c945168 100644 --- a/drivers/base/power/opp.c +++ b/drivers/base/power/opp.c @@ -280,6 +280,40 @@ unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) } EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq); +/** + * dev_pm_opp_is_turbo() - Returns if opp is turbo OPP or not + * @opp: opp for which turbo mode is being verified + * + * Turbo OPPs are not for normal use, and can be enabled (under certain + * conditions) for short duration of times to finish high throughput work + * quickly. Running on them for longer times may overheat the chip. + * + * Return: true if opp is turbo opp, else false. + * + * Locking: This function must be called under rcu_read_lock(). opp is a rcu + * protected pointer. This means that opp which could have been fetched by + * opp_find_freq_{exact,ceil,floor} functions is valid as long as we are + * under RCU lock. The pointer returned by the opp_find_freq family must be + * used in the same section as the usage of this function with the pointer + * prior to unlocking with rcu_read_unlock() to maintain the integrity of the + * pointer. + */ +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + struct dev_pm_opp *tmp_opp; + + opp_rcu_lockdep_assert(); + + tmp_opp = rcu_dereference(opp); + if (IS_ERR_OR_NULL(tmp_opp) || !tmp_opp->available) { + pr_err("%s: Invalid parameters\n", __func__); + return false; + } + + return tmp_opp->turbo; +} +EXPORT_SYMBOL_GPL(dev_pm_opp_is_turbo); + /** * dev_pm_opp_get_max_clock_latency() - Get max clock latency in nanoseconds * @dev: device for which we do this operation diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index bb52fae5b921..cab7ba55bedb 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h @@ -30,6 +30,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); +bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); + int dev_pm_opp_get_opp_count(struct device *dev); unsigned long dev_pm_opp_get_max_clock_latency(struct device *dev); @@ -63,6 +65,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) return 0; } +static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) +{ + return false; +} + static inline int dev_pm_opp_get_opp_count(struct device *dev) { return 0; -- cgit v1.2.3 From 44139ed4943ee8ec186eea3e9072ca16d2b48133 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Wed, 29 Jul 2015 16:23:09 +0530 Subject: cpufreq: Allow drivers to enable boost support after registering driver In some cases it wouldn't be known at time of driver registration, if the driver needs to support boost frequencies. For example, while getting boost information from DT with opp-v2 bindings, we need to parse the bindings for all the CPUs to know if turbo/boost OPPs are supported or not. One way out to do that efficiently is to delay supporting boost mode (i.e. creating /sys/devices/system/cpu/cpufreq/boost file), until the time OPP bindings are parsed. At that point, the driver can enable boost support. This can be done at ->init(), where the frequency table is created. To do that, the driver requires few APIs from cpufreq core that let him do this. This patch provides these APIs. Signed-off-by: Viresh Kumar Reviewed-by: Stephen Boyd Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 68 +++++++++++++++++++++++++++++++------------- drivers/cpufreq/freq_table.c | 15 ++++++++++ include/linux/cpufreq.h | 12 ++++++++ 3 files changed, 75 insertions(+), 20 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 76a26609d96b..e48242119d77 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2437,6 +2437,49 @@ int cpufreq_boost_supported(void) } EXPORT_SYMBOL_GPL(cpufreq_boost_supported); +static int create_boost_sysfs_file(void) +{ + int ret; + + if (!cpufreq_boost_supported()) + return 0; + + /* + * Check if driver provides function to enable boost - + * if not, use cpufreq_boost_set_sw as default + */ + if (!cpufreq_driver->set_boost) + cpufreq_driver->set_boost = cpufreq_boost_set_sw; + + ret = cpufreq_sysfs_create_file(&boost.attr); + if (ret) + pr_err("%s: cannot register global BOOST sysfs file\n", + __func__); + + return ret; +} + +static void remove_boost_sysfs_file(void) +{ + if (cpufreq_boost_supported()) + cpufreq_sysfs_remove_file(&boost.attr); +} + +int cpufreq_enable_boost_support(void) +{ + if (!cpufreq_driver) + return -EINVAL; + + if (cpufreq_boost_supported()) + return 0; + + cpufreq_driver->boost_supported = true; + + /* This will get removed on driver unregister */ + return create_boost_sysfs_file(); +} +EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support); + int cpufreq_boost_enabled(void) { return cpufreq_driver->boost_enabled; @@ -2490,21 +2533,9 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data) if (driver_data->setpolicy) driver_data->flags |= CPUFREQ_CONST_LOOPS; - if (cpufreq_boost_supported()) { - /* - * Check if driver provides function to enable boost - - * if not, use cpufreq_boost_set_sw as default - */ - if (!cpufreq_driver->set_boost) - cpufreq_driver->set_boost = cpufreq_boost_set_sw; - - ret = cpufreq_sysfs_create_file(&boost.attr); - if (ret) { - pr_err("%s: cannot register global BOOST sysfs file\n", - __func__); - goto err_null_driver; - } - } + ret = create_boost_sysfs_file(); + if (ret) + goto err_null_driver; ret = subsys_interface_register(&cpufreq_interface); if (ret) @@ -2528,8 +2559,7 @@ out: err_if_unreg: subsys_interface_unregister(&cpufreq_interface); err_boost_unreg: - if (cpufreq_boost_supported()) - cpufreq_sysfs_remove_file(&boost.attr); + remove_boost_sysfs_file(); err_null_driver: write_lock_irqsave(&cpufreq_driver_lock, flags); cpufreq_driver = NULL; @@ -2558,9 +2588,7 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver) /* Protect against concurrent cpu hotplug */ get_online_cpus(); subsys_interface_unregister(&cpufreq_interface); - if (cpufreq_boost_supported()) - cpufreq_sysfs_remove_file(&boost.attr); - + remove_boost_sysfs_file(); unregister_hotcpu_notifier(&cpufreq_cpu_notifier); write_lock_irqsave(&cpufreq_driver_lock, flags); diff --git a/drivers/cpufreq/freq_table.c b/drivers/cpufreq/freq_table.c index dfbbf981ed56..a8f1daffc9bc 100644 --- a/drivers/cpufreq/freq_table.c +++ b/drivers/cpufreq/freq_table.c @@ -18,6 +18,21 @@ * FREQUENCY TABLE HELPERS * *********************************************************************/ +bool policy_has_boost_freq(struct cpufreq_policy *policy) +{ + struct cpufreq_frequency_table *pos, *table = policy->freq_table; + + if (!table) + return false; + + cpufreq_for_each_valid_entry(pos, table) + if (pos->flags & CPUFREQ_BOOST_FREQ) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(policy_has_boost_freq); + int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table) { diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index bde1e567b3a9..95f018649abf 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -578,6 +578,8 @@ ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf); int cpufreq_boost_trigger_state(int state); int cpufreq_boost_supported(void); int cpufreq_boost_enabled(void); +int cpufreq_enable_boost_support(void); +bool policy_has_boost_freq(struct cpufreq_policy *policy); #else static inline int cpufreq_boost_trigger_state(int state) { @@ -591,6 +593,16 @@ static inline int cpufreq_boost_enabled(void) { return 0; } + +static inline int cpufreq_enable_boost_support(void) +{ + return -EINVAL; +} + +static inline bool policy_has_boost_freq(struct cpufreq_policy *policy) +{ + return false; +} #endif /* the following funtion is for cpufreq core use only */ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); -- cgit v1.2.3 From 21c36d35711d24a7689b7fb9606ce78f3b4c3d3b Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Fri, 7 Aug 2015 13:59:16 +0200 Subject: cpufreq-dt: make scaling_boost_freqs sysfs attr available when boost is enabled Make scaling_boost_freqs sysfs attribute is available when cpufreq-dt driver is used and boost support is enabled. Suggested-by: Viresh Kumar Acked-by: Viresh Kumar Signed-off-by: Bartlomiej Zolnierkiewicz Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq-dt.c | 9 ++++++++- include/linux/cpufreq.h | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index b9259abd25d4..c3583cdfadbd 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -36,6 +36,12 @@ struct private_data { unsigned int voltage_tolerance; /* in percentage */ }; +static struct freq_attr *cpufreq_dt_attr[] = { + &cpufreq_freq_attr_scaling_available_freqs, + NULL, /* Extra space for boost-attr if required */ + NULL, +}; + static int set_target(struct cpufreq_policy *policy, unsigned int index) { struct dev_pm_opp *opp; @@ -336,6 +342,7 @@ static int cpufreq_init(struct cpufreq_policy *policy) ret = cpufreq_enable_boost_support(); if (ret) goto out_free_cpufreq_table; + cpufreq_dt_attr[1] = &cpufreq_freq_attr_scaling_boost_freqs; } policy->cpuinfo.transition_latency = transition_latency; @@ -411,7 +418,7 @@ static struct cpufreq_driver dt_cpufreq_driver = { .exit = cpufreq_exit, .ready = cpufreq_ready, .name = "cpufreq-dt", - .attr = cpufreq_generic_attr, + .attr = cpufreq_dt_attr, }; static int dt_cpufreq_probe(struct platform_device *pdev) diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 95f018649abf..657542d3e23b 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -609,6 +609,7 @@ struct cpufreq_frequency_table *cpufreq_frequency_get_table(unsigned int cpu); /* the following are really really optional */ extern struct freq_attr cpufreq_freq_attr_scaling_available_freqs; +extern struct freq_attr cpufreq_freq_attr_scaling_boost_freqs; extern struct freq_attr *cpufreq_generic_attr[]; int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, struct cpufreq_frequency_table *table); -- cgit v1.2.3 From 4bf011815f2e093c7f60004f4f5683cf40b905b9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 10 Aug 2015 19:56:46 +0300 Subject: device property: check fwnode type in to_of_node() Potentially one of platform can support both ACPI and OF. In that case when we call to_of_node() for non-OF fwnode types we will get non-NULL result, which is wrong. Check for the type and return a correspondent result. Signed-off-by: Andy Shevchenko Reviewed-by: Mika Westerberg Signed-off-by: Rafael J. Wysocki --- include/linux/of.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/include/linux/of.h b/include/linux/of.h index edc068d19c79..2194b8ca41f9 100644 --- a/include/linux/of.h +++ b/include/linux/of.h @@ -136,7 +136,8 @@ static inline bool is_of_node(struct fwnode_handle *fwnode) static inline struct device_node *to_of_node(struct fwnode_handle *fwnode) { - return fwnode ? container_of(fwnode, struct device_node, fwnode) : NULL; + return is_of_node(fwnode) ? + container_of(fwnode, struct device_node, fwnode) : NULL; } static inline bool of_have_populated_dt(void) -- cgit v1.2.3 From 5d0ddfebb93069061880fc57ee4ba7246bd1e1ee Mon Sep 17 00:00:00 2001 From: Jiang Liu Date: Fri, 21 Aug 2015 15:36:23 +0800 Subject: ACPI, PCI: Penalize legacy IRQ used by ACPI SCI Nick Meier reported a regression with HyperV that " After rebooting the VM, the following messages are logged in syslog when trying to load the tulip driver: tulip: Linux Tulip drivers version 1.1.15 (Feb 27, 2007) tulip: 0000:00:0a.0: PCI INT A: failed to register GSI tulip: Cannot enable tulip board #0, aborting tulip: probe of 0000:00:0a.0 failed with error -16 Errors occur in 3.19.0 kernel Works in 3.17 kernel. " According to the ACPI dump file posted by Nick at https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1440072 The ACPI MADT table includes an interrupt source overridden entry for ACPI SCI: [236h 0566 1] Subtable Type : 02 [237h 0567 1] Length : 0A [238h 0568 1] Bus : 00 [239h 0569 1] Source : 09 [23Ah 0570 4] Interrupt : 00000009 [23Eh 0574 2] Flags (decoded below) : 000D Polarity : 1 Trigger Mode : 3 And in DSDT table, we have _PRT method to define PCI interrupts, which eventually goes to: Name (PRSA, ResourceTemplate () { IRQ (Level, ActiveLow, Shared, ) {3,4,5,7,9,10,11,12,14,15} }) Name (PRSB, ResourceTemplate () { IRQ (Level, ActiveLow, Shared, ) {3,4,5,7,9,10,11,12,14,15} }) Name (PRSC, ResourceTemplate () { IRQ (Level, ActiveLow, Shared, ) {3,4,5,7,9,10,11,12,14,15} }) Name (PRSD, ResourceTemplate () { IRQ (Level, ActiveLow, Shared, ) {3,4,5,7,9,10,11,12,14,15} }) According to the MADT and DSDT tables, IRQ 9 may be used for: 1) ACPI SCI in level, high mode 2) PCI legacy IRQ in level, low mode So there's a conflict in polarity setting for IRQ 9. Prior to commit cd68f6bd53cf ("x86, irq, acpi: Get rid of special handling of GSI for ACPI SCI"), ACPI SCI is handled specially and there's no check for conflicts between ACPI SCI and PCI legagy IRQ. And it seems that the HyperV hypervisor doesn't make use of the polarity configuration in IOAPIC entry, so it just works. Commit cd68f6bd53cf gets rid of the specially handling of ACPI SCI, and then the pin attribute checking code discloses the conflicts between ACPI SCI and PCI legacy IRQ on HyperV virtual machine, and rejects the request to assign IRQ9 to PCI devices. So penalize legacy IRQ used by ACPI SCI and mark it unusable if ACPI SCI attributes conflict with PCI IRQ attributes. Please refer to following links for more information: https://bugzilla.kernel.org/show_bug.cgi?id=101301 https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1440072 Fixes: cd68f6bd53cf ("x86, irq, acpi: Get rid of special handling of GSI for ACPI SCI") Reported-and-tested-by: Nick Meier Acked-by: Thomas Gleixner Cc: 3.19+ # 3.19+ Signed-off-by: Jiang Liu Signed-off-by: Rafael J. Wysocki --- arch/x86/kernel/acpi/boot.c | 1 + drivers/acpi/pci_link.c | 16 ++++++++++++++++ include/linux/acpi.h | 2 +- 3 files changed, 18 insertions(+), 1 deletion(-) (limited to 'include/linux') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index e49ee24da85e..9393896717d0 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -445,6 +445,7 @@ static void __init acpi_sci_ioapic_setup(u8 bus_irq, u16 polarity, u16 trigger, polarity = acpi_sci_flags & ACPI_MADT_POLARITY_MASK; mp_override_legacy_irq(bus_irq, polarity, trigger, gsi); + acpi_penalize_sci_irq(bus_irq, trigger, polarity); /* * stash over-ride to indicate we've been here diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c index cfd7581cc19f..b09ad554430a 100644 --- a/drivers/acpi/pci_link.c +++ b/drivers/acpi/pci_link.c @@ -825,6 +825,22 @@ void acpi_penalize_isa_irq(int irq, int active) } } +/* + * Penalize IRQ used by ACPI SCI. If ACPI SCI pin attributes conflict with + * PCI IRQ attributes, mark ACPI SCI as ISA_ALWAYS so it won't be use for + * PCI IRQs. + */ +void acpi_penalize_sci_irq(int irq, int trigger, int polarity) +{ + if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) { + if (trigger != ACPI_MADT_TRIGGER_LEVEL || + polarity != ACPI_MADT_POLARITY_ACTIVE_LOW) + acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS; + else + acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING; + } +} + /* * Over-ride default table to reserve additional IRQs for use by ISA * e.g. acpi_irq_isa=5 diff --git a/include/linux/acpi.h b/include/linux/acpi.h index d2445fa9999f..0b2394f61af4 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -221,7 +221,7 @@ struct pci_dev; int acpi_pci_irq_enable (struct pci_dev *dev); void acpi_penalize_isa_irq(int irq, int active); - +void acpi_penalize_sci_irq(int irq, int trigger, int polarity); void acpi_pci_irq_disable (struct pci_dev *dev); extern int ec_read(u8 addr, u8 *val); -- cgit v1.2.3 From ba6a860d41ed3a377d61d59d7c7b08dd7455c686 Mon Sep 17 00:00:00 2001 From: Xunlei Pang Date: Tue, 4 Aug 2015 13:48:55 +0800 Subject: cpuidle/coupled: Remove cpuidle_device::safe_state_index cpuidle_device::safe_state_index need to be initialized before use, it should be the same as cpuidle_driver::safe_state_index. We tackled this issue by removing the safe_state_index from the cpuidle_device structure and use the one in the cpuidle_driver structure instead. Suggested-by: Daniel Lezcano Signed-off-by: Xunlei Pang Signed-off-by: Rafael J. Wysocki --- drivers/cpuidle/coupled.c | 4 ++-- include/linux/cpuidle.h | 1 - 2 files changed, 2 insertions(+), 3 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpuidle/coupled.c b/drivers/cpuidle/coupled.c index 7936dce4b878..6493e4055abe 100644 --- a/drivers/cpuidle/coupled.c +++ b/drivers/cpuidle/coupled.c @@ -473,7 +473,7 @@ int cpuidle_enter_state_coupled(struct cpuidle_device *dev, return entered_state; } entered_state = cpuidle_enter_state(dev, drv, - dev->safe_state_index); + drv->safe_state_index); local_irq_disable(); } @@ -521,7 +521,7 @@ retry: } entered_state = cpuidle_enter_state(dev, drv, - dev->safe_state_index); + drv->safe_state_index); local_irq_disable(); } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index d075d34279df..786ad32631a6 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -84,7 +84,6 @@ struct cpuidle_device { struct list_head device_list; #ifdef CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED - int safe_state_index; cpumask_t coupled_cpus; struct cpuidle_coupled *coupled; #endif -- cgit v1.2.3 From ad440bf40e2846966da44e885bb7d8a1f8384fa6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Tue, 18 Aug 2015 13:38:02 +0200 Subject: PM / Domains: Remove unusable governor dummies The governor dummies for the !CONFIG_PM_GENERIC_DOMAINS case are unusable, as a governors is always referred to by taking its address, which you can't do with a literal NULL pointer. I.e. pm_genpd_init(genpd, &simple_qos_governor, false); fails to compile with: error: lvalue required as unary '&' operand Hence just remove the governor dummies. Signed-off-by: Geert Uytterhoeven Acked-by: Ulf Hansson Signed-off-by: Rafael J. Wysocki --- include/linux/pm_domain.h | 2 -- 1 file changed, 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index b2725e6e8e7b..b1cf7e797892 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h @@ -221,8 +221,6 @@ static inline int pm_genpd_name_poweron(const char *domain_name) return -ENOSYS; } static inline void pm_genpd_poweroff_unused(void) {} -#define simple_qos_governor NULL -#define pm_domain_always_on_gov NULL #endif static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, -- cgit v1.2.3 From 6bfb7c7434f75d29241413dc7e784295ba56de98 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 3 Aug 2015 08:36:14 +0530 Subject: cpufreq: remove redundant CPUFREQ_INCOMPATIBLE notifier event What's being done from CPUFREQ_INCOMPATIBLE, can also be done with CPUFREQ_ADJUST. There is nothing special with CPUFREQ_INCOMPATIBLE notifier. Kill CPUFREQ_INCOMPATIBLE and fix its usage sites. This also updates the numbering of notifier events to remove holes. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- Documentation/cpu-freq/core.txt | 7 ++----- drivers/acpi/processor_perflib.c | 2 +- drivers/cpufreq/cpufreq.c | 4 ---- drivers/cpufreq/ppc_cbe_cpufreq_pmi.c | 4 ++-- drivers/video/fbdev/pxafb.c | 1 - drivers/video/fbdev/sa1100fb.c | 1 - include/linux/cpufreq.h | 9 ++++----- 7 files changed, 9 insertions(+), 19 deletions(-) (limited to 'include/linux') diff --git a/Documentation/cpu-freq/core.txt b/Documentation/cpu-freq/core.txt index 70933eadc308..ba78e7c2a069 100644 --- a/Documentation/cpu-freq/core.txt +++ b/Documentation/cpu-freq/core.txt @@ -55,16 +55,13 @@ transition notifiers. ---------------------------- These are notified when a new policy is intended to be set. Each -CPUFreq policy notifier is called three times for a policy transition: +CPUFreq policy notifier is called twice for a policy transition: 1.) During CPUFREQ_ADJUST all CPUFreq notifiers may change the limit if they see a need for this - may it be thermal considerations or hardware limitations. -2.) During CPUFREQ_INCOMPATIBLE only changes may be done in order to avoid - hardware failure. - -3.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy +2.) And during CPUFREQ_NOTIFY all notifiers are informed of the new policy - if two hardware drivers failed to agree on a new policy before this stage, the incompatible hardware shall be shut down, and the user informed of this. diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 36b6da2918a6..91941f1cdecb 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -87,7 +87,7 @@ static int acpi_processor_ppc_notifier(struct notifier_block *nb, if (ignore_ppc) return 0; - if (event != CPUFREQ_INCOMPATIBLE) + if (event != CPUFREQ_ADJUST) return 0; mutex_lock(&performance_mutex); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 76a26609d96b..293f47b814bf 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -2206,10 +2206,6 @@ static int cpufreq_set_policy(struct cpufreq_policy *policy, blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_ADJUST, new_policy); - /* adjust if necessary - hardware incompatibility*/ - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, - CPUFREQ_INCOMPATIBLE, new_policy); - /* * verify the cpu speed can be set within this limit, which might be * different to the first one diff --git a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c index d29e8da396a0..7969f7690498 100644 --- a/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c +++ b/drivers/cpufreq/ppc_cbe_cpufreq_pmi.c @@ -97,8 +97,8 @@ static int pmi_notifier(struct notifier_block *nb, struct cpufreq_frequency_table *cbe_freqs; u8 node; - /* Should this really be called for CPUFREQ_ADJUST, CPUFREQ_INCOMPATIBLE - * and CPUFREQ_NOTIFY policy events?) + /* Should this really be called for CPUFREQ_ADJUST and CPUFREQ_NOTIFY + * policy events?) */ if (event == CPUFREQ_START) return 0; diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c index 7245611ec963..94813af97f09 100644 --- a/drivers/video/fbdev/pxafb.c +++ b/drivers/video/fbdev/pxafb.c @@ -1668,7 +1668,6 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) switch (val) { case CPUFREQ_ADJUST: - case CPUFREQ_INCOMPATIBLE: pr_debug("min dma period: %d ps, " "new clock %d kHz\n", pxafb_display_dma_period(var), policy->max); diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c index 89dd7e02197f..dcf774c15889 100644 --- a/drivers/video/fbdev/sa1100fb.c +++ b/drivers/video/fbdev/sa1100fb.c @@ -1042,7 +1042,6 @@ sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val, switch (val) { case CPUFREQ_ADJUST: - case CPUFREQ_INCOMPATIBLE: dev_dbg(fbi->dev, "min dma period: %d ps, " "new clock %d kHz\n", sa1100fb_min_dma_period(fbi), policy->max); diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index bde1e567b3a9..bedcc90c0757 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -369,11 +369,10 @@ static inline void cpufreq_resume(void) {} /* Policy Notifiers */ #define CPUFREQ_ADJUST (0) -#define CPUFREQ_INCOMPATIBLE (1) -#define CPUFREQ_NOTIFY (2) -#define CPUFREQ_START (3) -#define CPUFREQ_CREATE_POLICY (4) -#define CPUFREQ_REMOVE_POLICY (5) +#define CPUFREQ_NOTIFY (1) +#define CPUFREQ_START (2) +#define CPUFREQ_CREATE_POLICY (3) +#define CPUFREQ_REMOVE_POLICY (4) #ifdef CONFIG_CPU_FREQ int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list); -- cgit v1.2.3 From e27f8bd248756310a6df8b67f96d41d5a693642c Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 3 Aug 2015 08:36:17 +0530 Subject: cpufreq: remove redundant 'governor' field from user_policy Its always same as policy->governor, and there is no need to keep another copy of it. Remove it. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 7 ++----- include/linux/cpufreq.h | 1 - 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8e71d8e08439..3033952391fe 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -675,7 +675,6 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, return ret; policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; return count; } @@ -1323,10 +1322,9 @@ static int cpufreq_online(unsigned int cpu) goto out_exit_policy; } - if (new_policy) { + if (new_policy) policy->user_policy.policy = policy->policy; - policy->user_policy.governor = policy->governor; - } + up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); @@ -2305,7 +2303,6 @@ int cpufreq_update_policy(unsigned int cpu) new_policy.min = policy->user_policy.min; new_policy.max = policy->user_policy.max; new_policy.policy = policy->user_policy.policy; - new_policy.governor = policy->user_policy.governor; /* * BIOS might change freq behind our back diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index bedcc90c0757..752bf8a5c314 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -55,7 +55,6 @@ struct cpufreq_real_policy { unsigned int min; /* in kHz */ unsigned int max; /* in kHz */ unsigned int policy; /* see above */ - struct cpufreq_governor *governor; /* see below */ }; struct cpufreq_policy { -- cgit v1.2.3 From 88dc4384958759510db248bf9158434ac783d407 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 3 Aug 2015 08:36:18 +0530 Subject: cpufreq: remove redundant 'policy' field from user_policy Its always same as policy->policy, and there is no need to keep another copy of it. Remove it. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- drivers/cpufreq/cpufreq.c | 10 +--------- include/linux/cpufreq.h | 1 - 2 files changed, 1 insertion(+), 10 deletions(-) (limited to 'include/linux') diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 3033952391fe..a80bd68bbd74 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -671,11 +671,7 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, return -EINVAL; ret = cpufreq_set_policy(policy, &new_policy); - if (ret) - return ret; - - policy->user_policy.policy = policy->policy; - return count; + return ret ? ret : count; } /** @@ -1322,9 +1318,6 @@ static int cpufreq_online(unsigned int cpu) goto out_exit_policy; } - if (new_policy) - policy->user_policy.policy = policy->policy; - up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); @@ -2302,7 +2295,6 @@ int cpufreq_update_policy(unsigned int cpu) memcpy(&new_policy, policy, sizeof(*policy)); new_policy.min = policy->user_policy.min; new_policy.max = policy->user_policy.max; - new_policy.policy = policy->user_policy.policy; /* * BIOS might change freq behind our back diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 752bf8a5c314..54dbbff0a55e 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -54,7 +54,6 @@ struct cpufreq_cpuinfo { struct cpufreq_real_policy { unsigned int min; /* in kHz */ unsigned int max; /* in kHz */ - unsigned int policy; /* see above */ }; struct cpufreq_policy { -- cgit v1.2.3 From c7a7b418dd1991079dd7ef03fec7d1863ef96154 Mon Sep 17 00:00:00 2001 From: Viresh Kumar Date: Mon, 3 Aug 2015 08:36:19 +0530 Subject: cpufreq: rename cpufreq_real_policy as cpufreq_user_policy Its all about caching min/max freq requested by userspace, and the name 'cpufreq_real_policy' doesn't fit that well. Rename it to cpufreq_user_policy. Signed-off-by: Viresh Kumar Signed-off-by: Rafael J. Wysocki --- include/linux/cpufreq.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux') diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 54dbbff0a55e..6ff6a4d95eea 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h @@ -51,7 +51,7 @@ struct cpufreq_cpuinfo { unsigned int transition_latency; }; -struct cpufreq_real_policy { +struct cpufreq_user_policy { unsigned int min; /* in kHz */ unsigned int max; /* in kHz */ }; @@ -86,7 +86,7 @@ struct cpufreq_policy { struct work_struct update; /* if update_policy() needs to be * called, but you're in IRQ context */ - struct cpufreq_real_policy user_policy; + struct cpufreq_user_policy user_policy; struct cpufreq_frequency_table *freq_table; struct list_head policy_list; -- cgit v1.2.3