diff options
Diffstat (limited to 'drivers')
44 files changed, 1977 insertions, 871 deletions
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index a4c8ad98560d..c4d0a1c912f0 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -990,7 +990,7 @@ void acpi_subsys_complete(struct device *dev) * the sleep state it is going out of and it has never been resumed till * now, resume it in case the firmware powered it up. */ - if (dev->power.direct_complete && pm_resume_via_firmware()) + if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) pm_request_resume(dev); } EXPORT_SYMBOL_GPL(acpi_subsys_complete); @@ -1039,10 +1039,28 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_late); */ int acpi_subsys_suspend_noirq(struct device *dev) { - if (dev_pm_smart_suspend_and_suspended(dev)) + int ret; + + if (dev_pm_smart_suspend_and_suspended(dev)) { + dev->power.may_skip_resume = true; return 0; + } + + ret = pm_generic_suspend_noirq(dev); + if (ret) + return ret; + + /* + * If the target system sleep state is suspend-to-idle, it is sufficient + * to check whether or not the device's wakeup settings are good for + * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause + * acpi_subsys_complete() to take care of fixing up the device's state + * anyway, if need be. + */ + dev->power.may_skip_resume = device_may_wakeup(dev) || + !device_can_wakeup(dev); - return pm_generic_suspend_noirq(dev); + return 0; } EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); @@ -1052,6 +1070,9 @@ EXPORT_SYMBOL_GPL(acpi_subsys_suspend_noirq); */ int acpi_subsys_resume_noirq(struct device *dev) { + if (dev_pm_may_skip_resume(dev)) + return 0; + /* * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend * during system suspend, so update their runtime PM status to "active" diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 8082871b409a..46cde0912762 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -367,10 +367,20 @@ static const struct dmi_system_id acpisleep_dmi_table[] __initconst = { {}, }; +static bool ignore_blacklist; + +void __init acpi_sleep_no_blacklist(void) +{ + ignore_blacklist = true; +} + static void __init acpi_sleep_dmi_check(void) { int year; + if (ignore_blacklist) + return; + if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year >= 2012) acpi_nvs_nosave_s3(); @@ -697,7 +707,8 @@ static const struct acpi_device_id lps0_device_ids[] = { #define ACPI_LPS0_ENTRY 5 #define ACPI_LPS0_EXIT 6 -#define ACPI_S2IDLE_FUNC_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT)) +#define ACPI_LPS0_SCREEN_MASK ((1 << ACPI_LPS0_SCREEN_OFF) | (1 << ACPI_LPS0_SCREEN_ON)) +#define ACPI_LPS0_PLATFORM_MASK ((1 << ACPI_LPS0_ENTRY) | (1 << ACPI_LPS0_EXIT)) static acpi_handle lps0_device_handle; static guid_t lps0_dsm_guid; @@ -900,7 +911,8 @@ static int lps0_device_attach(struct acpi_device *adev, if (out_obj && out_obj->type == ACPI_TYPE_BUFFER) { char bitmask = *(char *)out_obj->buffer.pointer; - if ((bitmask & ACPI_S2IDLE_FUNC_MASK) == ACPI_S2IDLE_FUNC_MASK) { + if ((bitmask & ACPI_LPS0_PLATFORM_MASK) == ACPI_LPS0_PLATFORM_MASK || + (bitmask & ACPI_LPS0_SCREEN_MASK) == ACPI_LPS0_SCREEN_MASK) { lps0_dsm_func_mask = bitmask; lps0_device_handle = adev->handle; /* diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 0c80bea05bcb..528b24149bc7 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -1032,15 +1032,12 @@ static int genpd_prepare(struct device *dev) static int genpd_finish_suspend(struct device *dev, bool poweroff) { struct generic_pm_domain *genpd; - int ret; + int ret = 0; genpd = dev_to_genpd(dev); if (IS_ERR(genpd)) return -EINVAL; - if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) - return 0; - if (poweroff) ret = pm_generic_poweroff_noirq(dev); else @@ -1048,10 +1045,19 @@ static int genpd_finish_suspend(struct device *dev, bool poweroff) if (ret) return ret; - if (genpd->dev_ops.stop && genpd->dev_ops.start) { - ret = pm_runtime_force_suspend(dev); - if (ret) + if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) + return 0; + + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_stop_dev(genpd, dev); + if (ret) { + if (poweroff) + pm_generic_restore_noirq(dev); + else + pm_generic_resume_noirq(dev); return ret; + } } genpd_lock(genpd); @@ -1085,7 +1091,7 @@ static int genpd_suspend_noirq(struct device *dev) static int genpd_resume_noirq(struct device *dev) { struct generic_pm_domain *genpd; - int ret = 0; + int ret; dev_dbg(dev, "%s()\n", __func__); @@ -1094,21 +1100,21 @@ static int genpd_resume_noirq(struct device *dev) return -EINVAL; if (dev->power.wakeup_path && genpd_is_active_wakeup(genpd)) - return 0; + return pm_generic_resume_noirq(dev); genpd_lock(genpd); genpd_sync_power_on(genpd, true, 0); genpd->suspended_count--; genpd_unlock(genpd); - if (genpd->dev_ops.stop && genpd->dev_ops.start) - ret = pm_runtime_force_resume(dev); - - ret = pm_generic_resume_noirq(dev); - if (ret) - return ret; + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); + if (ret) + return ret; + } - return ret; + return pm_generic_resume_noirq(dev); } /** @@ -1135,8 +1141,9 @@ static int genpd_freeze_noirq(struct device *dev) if (ret) return ret; - if (genpd->dev_ops.stop && genpd->dev_ops.start) - ret = pm_runtime_force_suspend(dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) + ret = genpd_stop_dev(genpd, dev); return ret; } @@ -1159,8 +1166,9 @@ static int genpd_thaw_noirq(struct device *dev) if (IS_ERR(genpd)) return -EINVAL; - if (genpd->dev_ops.stop && genpd->dev_ops.start) { - ret = pm_runtime_force_resume(dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); if (ret) return ret; } @@ -1217,8 +1225,9 @@ static int genpd_restore_noirq(struct device *dev) genpd_sync_power_on(genpd, true, 0); genpd_unlock(genpd); - if (genpd->dev_ops.stop && genpd->dev_ops.start) { - ret = pm_runtime_force_resume(dev); + if (genpd->dev_ops.stop && genpd->dev_ops.start && + !pm_runtime_status_suspended(dev)) { + ret = genpd_start_dev(genpd, dev); if (ret) return ret; } @@ -2199,20 +2208,8 @@ int genpd_dev_pm_attach(struct device *dev) ret = of_parse_phandle_with_args(dev->of_node, "power-domains", "#power-domain-cells", 0, &pd_args); - if (ret < 0) { - if (ret != -ENOENT) - return ret; - - /* - * Try legacy Samsung-specific bindings - * (for backwards compatibility of DT ABI) - */ - pd_args.args_count = 0; - pd_args.np = of_parse_phandle(dev->of_node, - "samsung,power-domain", 0); - if (!pd_args.np) - return -ENOENT; - } + if (ret < 0) + return ret; mutex_lock(&gpd_list_lock); pd = genpd_get_from_provider(&pd_args); diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 08744b572af6..02a497e7c785 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -18,7 +18,6 @@ */ #include <linux/device.h> -#include <linux/kallsyms.h> #include <linux/export.h> #include <linux/mutex.h> #include <linux/pm.h> @@ -541,6 +540,73 @@ void dev_pm_skip_next_resume_phases(struct device *dev) } /** + * suspend_event - Return a "suspend" message for given "resume" one. + * @resume_msg: PM message representing a system-wide resume transition. + */ +static pm_message_t suspend_event(pm_message_t resume_msg) +{ + switch (resume_msg.event) { + case PM_EVENT_RESUME: + return PMSG_SUSPEND; + case PM_EVENT_THAW: + case PM_EVENT_RESTORE: + return PMSG_FREEZE; + case PM_EVENT_RECOVER: + return PMSG_HIBERNATE; + } + return PMSG_ON; +} + +/** + * dev_pm_may_skip_resume - System-wide device resume optimization check. + * @dev: Target device. + * + * Checks whether or not the device may be left in suspend after a system-wide + * transition to the working state. + */ +bool dev_pm_may_skip_resume(struct device *dev) +{ + return !dev->power.must_resume && pm_transition.event != PM_EVENT_RESTORE; +} + +static pm_callback_t dpm_subsys_resume_noirq_cb(struct device *dev, + pm_message_t state, + const char **info_p) +{ + pm_callback_t callback; + const char *info; + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } else { + return NULL; + } + + if (info_p) + *info_p = info; + + return callback; +} + +static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev, + pm_message_t state, + const char **info_p); + +static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev, + pm_message_t state, + const char **info_p); + +/** * device_resume_noirq - Execute a "noirq resume" callback for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. @@ -551,8 +617,9 @@ void dev_pm_skip_next_resume_phases(struct device *dev) */ static int device_resume_noirq(struct device *dev, pm_message_t state, bool async) { - pm_callback_t callback = NULL; - const char *info = NULL; + pm_callback_t callback; + const char *info; + bool skip_resume; int error = 0; TRACE_DEVICE(dev); @@ -566,29 +633,61 @@ static int device_resume_noirq(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_superior(dev, async); - if (dev->pm_domain) { - info = "noirq power domain "; - callback = pm_noirq_op(&dev->pm_domain->ops, state); - } else if (dev->type && dev->type->pm) { - info = "noirq type "; - callback = pm_noirq_op(dev->type->pm, state); - } else if (dev->class && dev->class->pm) { - info = "noirq class "; - callback = pm_noirq_op(dev->class->pm, state); - } else if (dev->bus && dev->bus->pm) { - info = "noirq bus "; - callback = pm_noirq_op(dev->bus->pm, state); + skip_resume = dev_pm_may_skip_resume(dev); + + callback = dpm_subsys_resume_noirq_cb(dev, state, &info); + if (callback) + goto Run; + + if (skip_resume) + goto Skip; + + if (dev_pm_smart_suspend_and_suspended(dev)) { + pm_message_t suspend_msg = suspend_event(state); + + /* + * If "freeze" callbacks have been skipped during a transition + * related to hibernation, the subsequent "thaw" callbacks must + * be skipped too or bad things may happen. Otherwise, resume + * callbacks are going to be run for the device, so its runtime + * PM status must be changed to reflect the new state after the + * transition under way. + */ + if (!dpm_subsys_suspend_late_cb(dev, suspend_msg, NULL) && + !dpm_subsys_suspend_noirq_cb(dev, suspend_msg, NULL)) { + if (state.event == PM_EVENT_THAW) { + skip_resume = true; + goto Skip; + } else { + pm_runtime_set_active(dev); + } + } } - if (!callback && dev->driver && dev->driver->pm) { + if (dev->driver && dev->driver->pm) { info = "noirq driver "; callback = pm_noirq_op(dev->driver->pm, state); } +Run: error = dpm_run_callback(callback, dev, state, info); + +Skip: dev->power.is_noirq_suspended = false; - Out: + if (skip_resume) { + /* + * The device is going to be left in suspend, but it might not + * have been in runtime suspend before the system suspended, so + * its runtime PM status needs to be updated to avoid confusing + * the runtime PM framework when runtime PM is enabled for the + * device again. + */ + pm_runtime_set_suspended(dev); + dev_pm_skip_next_resume_phases(dev); + } + +Out: complete_all(&dev->power.completion); TRACE_RESUME(error); return error; @@ -681,6 +780,35 @@ void dpm_resume_noirq(pm_message_t state) dpm_noirq_end(); } +static pm_callback_t dpm_subsys_resume_early_cb(struct device *dev, + pm_message_t state, + const char **info_p) +{ + pm_callback_t callback; + const char *info; + + if (dev->pm_domain) { + info = "early power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "early type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "early class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "early bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } else { + return NULL; + } + + if (info_p) + *info_p = info; + + return callback; +} + /** * device_resume_early - Execute an "early resume" callback for given device. * @dev: Device to handle. @@ -691,8 +819,8 @@ void dpm_resume_noirq(pm_message_t state) */ static int device_resume_early(struct device *dev, pm_message_t state, bool async) { - pm_callback_t callback = NULL; - const char *info = NULL; + pm_callback_t callback; + const char *info; int error = 0; TRACE_DEVICE(dev); @@ -706,19 +834,7 @@ static int device_resume_early(struct device *dev, pm_message_t state, bool asyn dpm_wait_for_superior(dev, async); - if (dev->pm_domain) { - info = "early power domain "; - callback = pm_late_early_op(&dev->pm_domain->ops, state); - } else if (dev->type && dev->type->pm) { - info = "early type "; - callback = pm_late_early_op(dev->type->pm, state); - } else if (dev->class && dev->class->pm) { - info = "early class "; - callback = pm_late_early_op(dev->class->pm, state); - } else if (dev->bus && dev->bus->pm) { - info = "early bus "; - callback = pm_late_early_op(dev->bus->pm, state); - } + callback = dpm_subsys_resume_early_cb(dev, state, &info); if (!callback && dev->driver && dev->driver->pm) { info = "early driver "; @@ -1089,6 +1205,77 @@ static pm_message_t resume_event(pm_message_t sleep_state) return PMSG_ON; } +static void dpm_superior_set_must_resume(struct device *dev) +{ + struct device_link *link; + int idx; + + if (dev->parent) + dev->parent->power.must_resume = true; + + idx = device_links_read_lock(); + + list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) + link->supplier->power.must_resume = true; + + device_links_read_unlock(idx); +} + +static pm_callback_t dpm_subsys_suspend_noirq_cb(struct device *dev, + pm_message_t state, + const char **info_p) +{ + pm_callback_t callback; + const char *info; + + if (dev->pm_domain) { + info = "noirq power domain "; + callback = pm_noirq_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "noirq type "; + callback = pm_noirq_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "noirq class "; + callback = pm_noirq_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "noirq bus "; + callback = pm_noirq_op(dev->bus->pm, state); + } else { + return NULL; + } + + if (info_p) + *info_p = info; + + return callback; +} + +static bool device_must_resume(struct device *dev, pm_message_t state, + bool no_subsys_suspend_noirq) +{ + pm_message_t resume_msg = resume_event(state); + + /* + * If all of the device driver's "noirq", "late" and "early" callbacks + * are invoked directly by the core, the decision to allow the device to + * stay in suspend can be based on its current runtime PM status and its + * wakeup settings. + */ + if (no_subsys_suspend_noirq && + !dpm_subsys_suspend_late_cb(dev, state, NULL) && + !dpm_subsys_resume_early_cb(dev, resume_msg, NULL) && + !dpm_subsys_resume_noirq_cb(dev, resume_msg, NULL)) + return !pm_runtime_status_suspended(dev) && + (resume_msg.event != PM_EVENT_RESUME || + (device_can_wakeup(dev) && !device_may_wakeup(dev))); + + /* + * The only safe strategy here is to require that if the device may not + * be left in suspend, resume callbacks must be invoked for it. + */ + return !dev->power.may_skip_resume; +} + /** * __device_suspend_noirq - Execute a "noirq suspend" callback for given device. * @dev: Device to handle. @@ -1100,8 +1287,9 @@ static pm_message_t resume_event(pm_message_t sleep_state) */ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool async) { - pm_callback_t callback = NULL; - const char *info = NULL; + pm_callback_t callback; + const char *info; + bool no_subsys_cb = false; int error = 0; TRACE_DEVICE(dev); @@ -1120,30 +1308,40 @@ static int __device_suspend_noirq(struct device *dev, pm_message_t state, bool a if (dev->power.syscore || dev->power.direct_complete) goto Complete; - if (dev->pm_domain) { - info = "noirq power domain "; - callback = pm_noirq_op(&dev->pm_domain->ops, state); - } else if (dev->type && dev->type->pm) { - info = "noirq type "; - callback = pm_noirq_op(dev->type->pm, state); - } else if (dev->class && dev->class->pm) { - info = "noirq class "; - callback = pm_noirq_op(dev->class->pm, state); - } else if (dev->bus && dev->bus->pm) { - info = "noirq bus "; - callback = pm_noirq_op(dev->bus->pm, state); - } + callback = dpm_subsys_suspend_noirq_cb(dev, state, &info); + if (callback) + goto Run; - if (!callback && dev->driver && dev->driver->pm) { + no_subsys_cb = !dpm_subsys_suspend_late_cb(dev, state, NULL); + + if (dev_pm_smart_suspend_and_suspended(dev) && no_subsys_cb) + goto Skip; + + if (dev->driver && dev->driver->pm) { info = "noirq driver "; callback = pm_noirq_op(dev->driver->pm, state); } +Run: error = dpm_run_callback(callback, dev, state, info); - if (!error) - dev->power.is_noirq_suspended = true; - else + if (error) { async_error = error; + goto Complete; + } + +Skip: + dev->power.is_noirq_suspended = true; + + if (dev_pm_test_driver_flags(dev, DPM_FLAG_LEAVE_SUSPENDED)) { + dev->power.must_resume = dev->power.must_resume || + atomic_read(&dev->power.usage_count) > 1 || + device_must_resume(dev, state, no_subsys_cb); + } else { + dev->power.must_resume = true; + } + + if (dev->power.must_resume) + dpm_superior_set_must_resume(dev); Complete: complete_all(&dev->power.completion); @@ -1249,6 +1447,50 @@ int dpm_suspend_noirq(pm_message_t state) return ret; } +static void dpm_propagate_wakeup_to_parent(struct device *dev) +{ + struct device *parent = dev->parent; + + if (!parent) + return; + + spin_lock_irq(&parent->power.lock); + + if (dev->power.wakeup_path && !parent->power.ignore_children) + parent->power.wakeup_path = true; + + spin_unlock_irq(&parent->power.lock); +} + +static pm_callback_t dpm_subsys_suspend_late_cb(struct device *dev, + pm_message_t state, + const char **info_p) +{ + pm_callback_t callback; + const char *info; + + if (dev->pm_domain) { + info = "late power domain "; + callback = pm_late_early_op(&dev->pm_domain->ops, state); + } else if (dev->type && dev->type->pm) { + info = "late type "; + callback = pm_late_early_op(dev->type->pm, state); + } else if (dev->class && dev->class->pm) { + info = "late class "; + callback = pm_late_early_op(dev->class->pm, state); + } else if (dev->bus && dev->bus->pm) { + info = "late bus "; + callback = pm_late_early_op(dev->bus->pm, state); + } else { + return NULL; + } + + if (info_p) + *info_p = info; + + return callback; +} + /** * __device_suspend_late - Execute a "late suspend" callback for given device. * @dev: Device to handle. @@ -1259,8 +1501,8 @@ int dpm_suspend_noirq(pm_message_t state) */ static int __device_suspend_late(struct device *dev, pm_message_t state, bool async) { - pm_callback_t callback = NULL; - const char *info = NULL; + pm_callback_t callback; + const char *info; int error = 0; TRACE_DEVICE(dev); @@ -1281,30 +1523,29 @@ static int __device_suspend_late(struct device *dev, pm_message_t state, bool as if (dev->power.syscore || dev->power.direct_complete) goto Complete; - if (dev->pm_domain) { - info = "late power domain "; - callback = pm_late_early_op(&dev->pm_domain->ops, state); - } else if (dev->type && dev->type->pm) { - info = "late type "; - callback = pm_late_early_op(dev->type->pm, state); - } else if (dev->class && dev->class->pm) { - info = "late class "; - callback = pm_late_early_op(dev->class->pm, state); - } else if (dev->bus && dev->bus->pm) { - info = "late bus "; - callback = pm_late_early_op(dev->bus->pm, state); - } + callback = dpm_subsys_suspend_late_cb(dev, state, &info); + if (callback) + goto Run; - if (!callback && dev->driver && dev->driver->pm) { + if (dev_pm_smart_suspend_and_suspended(dev) && + !dpm_subsys_suspend_noirq_cb(dev, state, NULL)) + goto Skip; + + if (dev->driver && dev->driver->pm) { info = "late driver "; callback = pm_late_early_op(dev->driver->pm, state); } +Run: error = dpm_run_callback(callback, dev, state, info); - if (!error) - dev->power.is_late_suspended = true; - else + if (error) { async_error = error; + goto Complete; + } + dpm_propagate_wakeup_to_parent(dev); + +Skip: + dev->power.is_late_suspended = true; Complete: TRACE_SUSPEND(error); @@ -1435,11 +1676,17 @@ static int legacy_suspend(struct device *dev, pm_message_t state, return error; } -static void dpm_clear_suppliers_direct_complete(struct device *dev) +static void dpm_clear_superiors_direct_complete(struct device *dev) { struct device_link *link; int idx; + if (dev->parent) { + spin_lock_irq(&dev->parent->power.lock); + dev->parent->power.direct_complete = false; + spin_unlock_irq(&dev->parent->power.lock); + } + idx = device_links_read_lock(); list_for_each_entry_rcu(link, &dev->links.suppliers, c_node) { @@ -1500,6 +1747,9 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) dev->power.direct_complete = false; } + dev->power.may_skip_resume = false; + dev->power.must_resume = false; + dpm_watchdog_set(&wd, dev); device_lock(dev); @@ -1543,20 +1793,12 @@ static int __device_suspend(struct device *dev, pm_message_t state, bool async) End: if (!error) { - struct device *parent = dev->parent; - dev->power.is_suspended = true; - if (parent) { - spin_lock_irq(&parent->power.lock); - - dev->parent->power.direct_complete = false; - if (dev->power.wakeup_path - && !dev->parent->power.ignore_children) - dev->parent->power.wakeup_path = true; + if (device_may_wakeup(dev)) + dev->power.wakeup_path = true; - spin_unlock_irq(&parent->power.lock); - } - dpm_clear_suppliers_direct_complete(dev); + dpm_propagate_wakeup_to_parent(dev); + dpm_clear_superiors_direct_complete(dev); } device_unlock(dev); @@ -1665,8 +1907,9 @@ static int device_prepare(struct device *dev, pm_message_t state) if (dev->power.syscore) return 0; - WARN_ON(dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND) && - !pm_runtime_enabled(dev)); + WARN_ON(!pm_runtime_enabled(dev) && + dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED)); /* * If a device's parent goes into runtime suspend at the wrong time, @@ -1678,7 +1921,7 @@ static int device_prepare(struct device *dev, pm_message_t state) device_lock(dev); - dev->power.wakeup_path = device_may_wakeup(dev); + dev->power.wakeup_path = false; if (dev->power.no_pm_callbacks) { ret = 1; /* Let device go direct_complete */ diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 7beee75399d4..21244c53e377 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -41,20 +41,15 @@ extern void dev_pm_disable_wake_irq_check(struct device *dev); #ifdef CONFIG_PM_SLEEP -extern int device_wakeup_attach_irq(struct device *dev, - struct wake_irq *wakeirq); +extern void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq); extern void device_wakeup_detach_irq(struct device *dev); extern void device_wakeup_arm_wake_irqs(void); extern void device_wakeup_disarm_wake_irqs(void); #else -static inline int -device_wakeup_attach_irq(struct device *dev, - struct wake_irq *wakeirq) -{ - return 0; -} +static inline void device_wakeup_attach_irq(struct device *dev, + struct wake_irq *wakeirq) {} static inline void device_wakeup_detach_irq(struct device *dev) { diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 6e89b51ea3d9..8bef3cb2424d 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -1613,22 +1613,34 @@ void pm_runtime_drop_link(struct device *dev) spin_unlock_irq(&dev->power.lock); } +static bool pm_runtime_need_not_resume(struct device *dev) +{ + return atomic_read(&dev->power.usage_count) <= 1 && + (atomic_read(&dev->power.child_count) == 0 || + dev->power.ignore_children); +} + /** * pm_runtime_force_suspend - Force a device into suspend state if needed. * @dev: Device to suspend. * * Disable runtime PM so we safely can check the device's runtime PM status and - * if it is active, invoke it's .runtime_suspend callback to bring it into - * suspend state. Keep runtime PM disabled to preserve the state unless we - * encounter errors. + * if it is active, invoke its ->runtime_suspend callback to suspend it and + * change its runtime PM status field to RPM_SUSPENDED. Also, if the device's + * usage and children counters don't indicate that the device was in use before + * the system-wide transition under way, decrement its parent's children counter + * (if there is a parent). Keep runtime PM disabled to preserve the state + * unless we encounter errors. * * Typically this function may be invoked from a system suspend callback to make - * sure the device is put into low power state. + * sure the device is put into low power state and it should only be used during + * system-wide PM transitions to sleep states. It assumes that the analogous + * pm_runtime_force_resume() will be used to resume the device. */ int pm_runtime_force_suspend(struct device *dev) { int (*callback)(struct device *); - int ret = 0; + int ret; pm_runtime_disable(dev); if (pm_runtime_status_suspended(dev)) @@ -1636,27 +1648,23 @@ int pm_runtime_force_suspend(struct device *dev) callback = RPM_GET_CALLBACK(dev, runtime_suspend); - if (!callback) { - ret = -ENOSYS; - goto err; - } - - ret = callback(dev); + ret = callback ? callback(dev) : 0; if (ret) goto err; /* - * Increase the runtime PM usage count for the device's parent, in case - * when we find the device being used when system suspend was invoked. - * This informs pm_runtime_force_resume() to resume the parent - * immediately, which is needed to be able to resume its children, - * when not deferring the resume to be managed via runtime PM. + * If the device can stay in suspend after the system-wide transition + * to the working state that will follow, drop the children counter of + * its parent, but set its status to RPM_SUSPENDED anyway in case this + * function will be called again for it in the meantime. */ - if (dev->parent && atomic_read(&dev->power.usage_count) > 1) - pm_runtime_get_noresume(dev->parent); + if (pm_runtime_need_not_resume(dev)) + pm_runtime_set_suspended(dev); + else + __update_runtime_status(dev, RPM_SUSPENDED); - pm_runtime_set_suspended(dev); return 0; + err: pm_runtime_enable(dev); return ret; @@ -1669,13 +1677,9 @@ EXPORT_SYMBOL_GPL(pm_runtime_force_suspend); * * Prior invoking this function we expect the user to have brought the device * into low power state by a call to pm_runtime_force_suspend(). Here we reverse - * those actions and brings the device into full power, if it is expected to be - * used on system resume. To distinguish that, we check whether the runtime PM - * usage count is greater than 1 (the PM core increases the usage count in the - * system PM prepare phase), as that indicates a real user (such as a subsystem, - * driver, userspace, etc.) is using it. If that is the case, the device is - * expected to be used on system resume as well, so then we resume it. In the - * other case, we defer the resume to be managed via runtime PM. + * those actions and bring the device into full power, if it is expected to be + * used on system resume. In the other case, we defer the resume to be managed + * via runtime PM. * * Typically this function may be invoked from a system resume callback. */ @@ -1684,32 +1688,18 @@ int pm_runtime_force_resume(struct device *dev) int (*callback)(struct device *); int ret = 0; - callback = RPM_GET_CALLBACK(dev, runtime_resume); - - if (!callback) { - ret = -ENOSYS; - goto out; - } - - if (!pm_runtime_status_suspended(dev)) + if (!pm_runtime_status_suspended(dev) || pm_runtime_need_not_resume(dev)) goto out; /* - * Decrease the parent's runtime PM usage count, if we increased it - * during system suspend in pm_runtime_force_suspend(). - */ - if (atomic_read(&dev->power.usage_count) > 1) { - if (dev->parent) - pm_runtime_put_noidle(dev->parent); - } else { - goto out; - } + * The value of the parent's children counter is correct already, so + * just update the status of the device. + */ + __update_runtime_status(dev, RPM_ACTIVE); - ret = pm_runtime_set_active(dev); - if (ret) - goto out; + callback = RPM_GET_CALLBACK(dev, runtime_resume); - ret = callback(dev); + ret = callback ? callback(dev) : 0; if (ret) { pm_runtime_set_suspended(dev); goto out; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index e153e28b1857..0f651efc58a1 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -108,16 +108,10 @@ static ssize_t control_show(struct device *dev, struct device_attribute *attr, static ssize_t control_store(struct device * dev, struct device_attribute *attr, const char * buf, size_t n) { - char *cp; - int len = n; - - cp = memchr(buf, '\n', n); - if (cp) - len = cp - buf; device_lock(dev); - if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) + if (sysfs_streq(buf, ctrl_auto)) pm_runtime_allow(dev); - else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) + else if (sysfs_streq(buf, ctrl_on)) pm_runtime_forbid(dev); else n = -EINVAL; @@ -125,9 +119,9 @@ static ssize_t control_store(struct device * dev, struct device_attribute *attr, return n; } -static DEVICE_ATTR(control, 0644, control_show, control_store); +static DEVICE_ATTR_RW(control); -static ssize_t rtpm_active_time_show(struct device *dev, +static ssize_t runtime_active_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; @@ -138,9 +132,9 @@ static ssize_t rtpm_active_time_show(struct device *dev, return ret; } -static DEVICE_ATTR(runtime_active_time, 0444, rtpm_active_time_show, NULL); +static DEVICE_ATTR_RO(runtime_active_time); -static ssize_t rtpm_suspended_time_show(struct device *dev, +static ssize_t runtime_suspended_time_show(struct device *dev, struct device_attribute *attr, char *buf) { int ret; @@ -152,9 +146,9 @@ static ssize_t rtpm_suspended_time_show(struct device *dev, return ret; } -static DEVICE_ATTR(runtime_suspended_time, 0444, rtpm_suspended_time_show, NULL); +static DEVICE_ATTR_RO(runtime_suspended_time); -static ssize_t rtpm_status_show(struct device *dev, +static ssize_t runtime_status_show(struct device *dev, struct device_attribute *attr, char *buf) { const char *p; @@ -184,7 +178,7 @@ static ssize_t rtpm_status_show(struct device *dev, return sprintf(buf, p); } -static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL); +static DEVICE_ATTR_RO(runtime_status); static ssize_t autosuspend_delay_ms_show(struct device *dev, struct device_attribute *attr, char *buf) @@ -211,26 +205,25 @@ static ssize_t autosuspend_delay_ms_store(struct device *dev, return n; } -static DEVICE_ATTR(autosuspend_delay_ms, 0644, autosuspend_delay_ms_show, - autosuspend_delay_ms_store); +static DEVICE_ATTR_RW(autosuspend_delay_ms); -static ssize_t pm_qos_resume_latency_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t pm_qos_resume_latency_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) { s32 value = dev_pm_qos_requested_resume_latency(dev); if (value == 0) return sprintf(buf, "n/a\n"); - else if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) + if (value == PM_QOS_RESUME_LATENCY_NO_CONSTRAINT) value = 0; return sprintf(buf, "%d\n", value); } -static ssize_t pm_qos_resume_latency_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_resume_latency_us_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -245,7 +238,7 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, if (value == 0) value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; - } else if (!strcmp(buf, "n/a") || !strcmp(buf, "n/a\n")) { + } else if (sysfs_streq(buf, "n/a")) { value = 0; } else { return -EINVAL; @@ -256,26 +249,25 @@ static ssize_t pm_qos_resume_latency_store(struct device *dev, return ret < 0 ? ret : n; } -static DEVICE_ATTR(pm_qos_resume_latency_us, 0644, - pm_qos_resume_latency_show, pm_qos_resume_latency_store); +static DEVICE_ATTR_RW(pm_qos_resume_latency_us); -static ssize_t pm_qos_latency_tolerance_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t pm_qos_latency_tolerance_us_show(struct device *dev, + struct device_attribute *attr, + char *buf) { s32 value = dev_pm_qos_get_user_latency_tolerance(dev); if (value < 0) return sprintf(buf, "auto\n"); - else if (value == PM_QOS_LATENCY_ANY) + if (value == PM_QOS_LATENCY_ANY) return sprintf(buf, "any\n"); return sprintf(buf, "%d\n", value); } -static ssize_t pm_qos_latency_tolerance_store(struct device *dev, - struct device_attribute *attr, - const char *buf, size_t n) +static ssize_t pm_qos_latency_tolerance_us_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t n) { s32 value; int ret; @@ -285,9 +277,9 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, if (value < 0) return -EINVAL; } else { - if (!strcmp(buf, "auto") || !strcmp(buf, "auto\n")) + if (sysfs_streq(buf, "auto")) value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; - else if (!strcmp(buf, "any") || !strcmp(buf, "any\n")) + else if (sysfs_streq(buf, "any")) value = PM_QOS_LATENCY_ANY; else return -EINVAL; @@ -296,8 +288,7 @@ static ssize_t pm_qos_latency_tolerance_store(struct device *dev, return ret < 0 ? ret : n; } -static DEVICE_ATTR(pm_qos_latency_tolerance_us, 0644, - pm_qos_latency_tolerance_show, pm_qos_latency_tolerance_store); +static DEVICE_ATTR_RW(pm_qos_latency_tolerance_us); static ssize_t pm_qos_no_power_off_show(struct device *dev, struct device_attribute *attr, @@ -323,49 +314,39 @@ static ssize_t pm_qos_no_power_off_store(struct device *dev, return ret < 0 ? ret : n; } -static DEVICE_ATTR(pm_qos_no_power_off, 0644, - pm_qos_no_power_off_show, pm_qos_no_power_off_store); +static DEVICE_ATTR_RW(pm_qos_no_power_off); #ifdef CONFIG_PM_SLEEP static const char _enabled[] = "enabled"; static const char _disabled[] = "disabled"; -static ssize_t -wake_show(struct device * dev, struct device_attribute *attr, char * buf) +static ssize_t wakeup_show(struct device *dev, struct device_attribute *attr, + char *buf) { return sprintf(buf, "%s\n", device_can_wakeup(dev) ? (device_may_wakeup(dev) ? _enabled : _disabled) : ""); } -static ssize_t -wake_store(struct device * dev, struct device_attribute *attr, - const char * buf, size_t n) +static ssize_t wakeup_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) { - char *cp; - int len = n; - if (!device_can_wakeup(dev)) return -EINVAL; - cp = memchr(buf, '\n', n); - if (cp) - len = cp - buf; - if (len == sizeof _enabled - 1 - && strncmp(buf, _enabled, sizeof _enabled - 1) == 0) + if (sysfs_streq(buf, _enabled)) device_set_wakeup_enable(dev, 1); - else if (len == sizeof _disabled - 1 - && strncmp(buf, _disabled, sizeof _disabled - 1) == 0) + else if (sysfs_streq(buf, _disabled)) device_set_wakeup_enable(dev, 0); else return -EINVAL; return n; } -static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +static DEVICE_ATTR_RW(wakeup); static ssize_t wakeup_count_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { unsigned long count = 0; bool enabled = false; @@ -379,10 +360,11 @@ static ssize_t wakeup_count_show(struct device *dev, return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_count, 0444, wakeup_count_show, NULL); +static DEVICE_ATTR_RO(wakeup_count); static ssize_t wakeup_active_count_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, + char *buf) { unsigned long count = 0; bool enabled = false; @@ -396,11 +378,11 @@ static ssize_t wakeup_active_count_show(struct device *dev, return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_active_count, 0444, wakeup_active_count_show, NULL); +static DEVICE_ATTR_RO(wakeup_active_count); static ssize_t wakeup_abort_count_show(struct device *dev, - struct device_attribute *attr, - char *buf) + struct device_attribute *attr, + char *buf) { unsigned long count = 0; bool enabled = false; @@ -414,7 +396,7 @@ static ssize_t wakeup_abort_count_show(struct device *dev, return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_abort_count, 0444, wakeup_abort_count_show, NULL); +static DEVICE_ATTR_RO(wakeup_abort_count); static ssize_t wakeup_expire_count_show(struct device *dev, struct device_attribute *attr, @@ -432,10 +414,10 @@ static ssize_t wakeup_expire_count_show(struct device *dev, return enabled ? sprintf(buf, "%lu\n", count) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_expire_count, 0444, wakeup_expire_count_show, NULL); +static DEVICE_ATTR_RO(wakeup_expire_count); static ssize_t wakeup_active_show(struct device *dev, - struct device_attribute *attr, char *buf) + struct device_attribute *attr, char *buf) { unsigned int active = 0; bool enabled = false; @@ -449,10 +431,11 @@ static ssize_t wakeup_active_show(struct device *dev, return enabled ? sprintf(buf, "%u\n", active) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_active, 0444, wakeup_active_show, NULL); +static DEVICE_ATTR_RO(wakeup_active); -static ssize_t wakeup_total_time_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wakeup_total_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) { s64 msec = 0; bool enabled = false; @@ -466,10 +449,10 @@ static ssize_t wakeup_total_time_show(struct device *dev, return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_total_time_ms, 0444, wakeup_total_time_show, NULL); +static DEVICE_ATTR_RO(wakeup_total_time_ms); -static ssize_t wakeup_max_time_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wakeup_max_time_ms_show(struct device *dev, + struct device_attribute *attr, char *buf) { s64 msec = 0; bool enabled = false; @@ -483,10 +466,11 @@ static ssize_t wakeup_max_time_show(struct device *dev, return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_max_time_ms, 0444, wakeup_max_time_show, NULL); +static DEVICE_ATTR_RO(wakeup_max_time_ms); -static ssize_t wakeup_last_time_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t wakeup_last_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) { s64 msec = 0; bool enabled = false; @@ -500,12 +484,12 @@ static ssize_t wakeup_last_time_show(struct device *dev, return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_last_time_ms, 0444, wakeup_last_time_show, NULL); +static DEVICE_ATTR_RO(wakeup_last_time_ms); #ifdef CONFIG_PM_AUTOSLEEP -static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, - struct device_attribute *attr, - char *buf) +static ssize_t wakeup_prevent_sleep_time_ms_show(struct device *dev, + struct device_attribute *attr, + char *buf) { s64 msec = 0; bool enabled = false; @@ -519,40 +503,39 @@ static ssize_t wakeup_prevent_sleep_time_show(struct device *dev, return enabled ? sprintf(buf, "%lld\n", msec) : sprintf(buf, "\n"); } -static DEVICE_ATTR(wakeup_prevent_sleep_time_ms, 0444, - wakeup_prevent_sleep_time_show, NULL); +static DEVICE_ATTR_RO(wakeup_prevent_sleep_time_ms); #endif /* CONFIG_PM_AUTOSLEEP */ #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_ADVANCED_DEBUG -static ssize_t rtpm_usagecount_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t runtime_usage_show(struct device *dev, + struct device_attribute *attr, char *buf) { return sprintf(buf, "%d\n", atomic_read(&dev->power.usage_count)); } +static DEVICE_ATTR_RO(runtime_usage); -static ssize_t rtpm_children_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t runtime_active_kids_show(struct device *dev, + struct device_attribute *attr, + char *buf) { return sprintf(buf, "%d\n", dev->power.ignore_children ? 0 : atomic_read(&dev->power.child_count)); } +static DEVICE_ATTR_RO(runtime_active_kids); -static ssize_t rtpm_enabled_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t runtime_enabled_show(struct device *dev, + struct device_attribute *attr, char *buf) { - if ((dev->power.disable_depth) && (dev->power.runtime_auto == false)) + if (dev->power.disable_depth && (dev->power.runtime_auto == false)) return sprintf(buf, "disabled & forbidden\n"); - else if (dev->power.disable_depth) + if (dev->power.disable_depth) return sprintf(buf, "disabled\n"); - else if (dev->power.runtime_auto == false) + if (dev->power.runtime_auto == false) return sprintf(buf, "forbidden\n"); return sprintf(buf, "enabled\n"); } - -static DEVICE_ATTR(runtime_usage, 0444, rtpm_usagecount_show, NULL); -static DEVICE_ATTR(runtime_active_kids, 0444, rtpm_children_show, NULL); -static DEVICE_ATTR(runtime_enabled, 0444, rtpm_enabled_show, NULL); +static DEVICE_ATTR_RO(runtime_enabled); #ifdef CONFIG_PM_SLEEP static ssize_t async_show(struct device *dev, struct device_attribute *attr, @@ -566,23 +549,16 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr, static ssize_t async_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t n) { - char *cp; - int len = n; - - cp = memchr(buf, '\n', n); - if (cp) - len = cp - buf; - if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0) + if (sysfs_streq(buf, _enabled)) device_enable_async_suspend(dev); - else if (len == sizeof _disabled - 1 && - strncmp(buf, _disabled, len) == 0) + else if (sysfs_streq(buf, _disabled)) device_disable_async_suspend(dev); else return -EINVAL; return n; } -static DEVICE_ATTR(async, 0644, async_show, async_store); +static DEVICE_ATTR_RW(async); #endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_ADVANCED_DEBUG */ diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index ae0429827f31..a8ac86e4d79e 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -33,7 +33,6 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq, struct wake_irq *wirq) { unsigned long flags; - int err; if (!dev || !wirq) return -EINVAL; @@ -45,12 +44,11 @@ static int dev_pm_attach_wake_irq(struct device *dev, int irq, return -EEXIST; } - err = device_wakeup_attach_irq(dev, wirq); - if (!err) - dev->power.wakeirq = wirq; + dev->power.wakeirq = wirq; + device_wakeup_attach_irq(dev, wirq); spin_unlock_irqrestore(&dev->power.lock, flags); - return err; + return 0; } /** diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 38559f04db2c..ea01621ed769 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -19,6 +19,11 @@ #include "power.h" +#ifndef CONFIG_SUSPEND +suspend_state_t pm_suspend_target_state; +#define pm_suspend_target_state (PM_SUSPEND_ON) +#endif + /* * If set, the suspend/hibernate code will abort transitions to a sleep state * if wakeup events are registered during or immediately before the transition. @@ -268,6 +273,9 @@ int device_wakeup_enable(struct device *dev) if (!dev || !dev->power.can_wakeup) return -EINVAL; + if (pm_suspend_target_state != PM_SUSPEND_ON) + dev_dbg(dev, "Suspicious %s() during system transition!\n", __func__); + ws = wakeup_source_register(dev_name(dev)); if (!ws) return -ENOMEM; @@ -291,22 +299,19 @@ EXPORT_SYMBOL_GPL(device_wakeup_enable); * * Call under the device's power.lock lock. */ -int device_wakeup_attach_irq(struct device *dev, +void device_wakeup_attach_irq(struct device *dev, struct wake_irq *wakeirq) { struct wakeup_source *ws; ws = dev->power.wakeup; - if (!ws) { - dev_err(dev, "forgot to call call device_init_wakeup?\n"); - return -EINVAL; - } + if (!ws) + return; if (ws->wakeirq) - return -EEXIST; + dev_err(dev, "Leftover wakeup IRQ found, overriding\n"); ws->wakeirq = wakeirq; - return 0; } /** @@ -448,9 +453,7 @@ int device_init_wakeup(struct device *dev, bool enable) device_set_wakeup_capable(dev, true); ret = device_wakeup_enable(dev); } else { - if (dev->power.can_wakeup) - device_wakeup_disable(dev); - + device_wakeup_disable(dev); device_set_wakeup_capable(dev, false); } @@ -464,9 +467,6 @@ EXPORT_SYMBOL_GPL(device_init_wakeup); */ int device_set_wakeup_enable(struct device *dev, bool enable) { - if (!dev || !dev->power.can_wakeup) - return -EINVAL; - return enable ? device_wakeup_enable(dev) : device_wakeup_disable(dev); } EXPORT_SYMBOL_GPL(device_set_wakeup_enable); diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index dc7b3c7b7d42..57e011d36a79 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -120,7 +120,7 @@ config QCOM_EBI2 SRAM, ethernet adapters, FPGAs and LCD displays. config SIMPLE_PM_BUS - bool "Simple Power-Managed Bus Driver" + tristate "Simple Power-Managed Bus Driver" depends on OF && PM help Driver for transparent busses that don't need a real driver, but diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm index bdce4488ded1..3a88e33b0cfe 100644 --- a/drivers/cpufreq/Kconfig.arm +++ b/drivers/cpufreq/Kconfig.arm @@ -2,6 +2,29 @@ # ARM CPU Frequency scaling drivers # +config ACPI_CPPC_CPUFREQ + tristate "CPUFreq driver based on the ACPI CPPC spec" + depends on ACPI_PROCESSOR + select ACPI_CPPC_LIB + help + This adds a CPUFreq driver which uses CPPC methods + as described in the ACPIv5.1 spec. CPPC stands for + Collaborative Processor Performance Controls. It + is based on an abstract continuous scale of CPU + performance values which allows the remote power + processor to flexibly optimize for power and + performance. CPPC relies on power management firmware + support for its operation. + + If in doubt, say N. + +config ARM_ARMADA_37XX_CPUFREQ + tristate "Armada 37xx CPUFreq support" + depends on ARCH_MVEBU + help + This adds the CPUFreq driver support for Marvell Armada 37xx SoCs. + The Armada 37xx PMU supports 4 frequency and VDD levels. + # big LITTLE core layer and glue drivers config ARM_BIG_LITTLE_CPUFREQ tristate "Generic ARM big LITTLE CPUfreq driver" @@ -12,6 +35,30 @@ config ARM_BIG_LITTLE_CPUFREQ help This enables the Generic CPUfreq driver for ARM big.LITTLE platforms. +config ARM_DT_BL_CPUFREQ + tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && OF + help + This enables probing via DT for Generic CPUfreq driver for ARM + big.LITTLE platform. This gets frequency tables from DT. + +config ARM_SCPI_CPUFREQ + tristate "SCPI based CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI + help + This adds the CPUfreq driver support for ARM big.LITTLE platforms + using SCPI protocol for CPU power management. + + This driver uses SCPI Message Protocol driver to interact with the + firmware providing the CPU DVFS functionality. + +config ARM_VEXPRESS_SPC_CPUFREQ + tristate "Versatile Express SPC based CPUfreq driver" + depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC + help + This add the CPUfreq driver support for Versatile Express + big.LITTLE platforms using SPC for power management. + config ARM_BRCMSTB_AVS_CPUFREQ tristate "Broadcom STB AVS CPUfreq driver" depends on ARCH_BRCMSTB || COMPILE_TEST @@ -33,20 +80,6 @@ config ARM_BRCMSTB_AVS_CPUFREQ_DEBUG If in doubt, say N. -config ARM_DT_BL_CPUFREQ - tristate "Generic probing via DT for ARM big LITTLE CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && OF - help - This enables probing via DT for Generic CPUfreq driver for ARM - big.LITTLE platform. This gets frequency tables from DT. - -config ARM_VEXPRESS_SPC_CPUFREQ - tristate "Versatile Express SPC based CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && ARCH_VEXPRESS_SPC - help - This add the CPUfreq driver support for Versatile Express - big.LITTLE platforms using SPC for power management. - config ARM_EXYNOS5440_CPUFREQ tristate "SAMSUNG EXYNOS5440" depends on SOC_EXYNOS5440 @@ -205,16 +238,6 @@ config ARM_SA1100_CPUFREQ config ARM_SA1110_CPUFREQ bool -config ARM_SCPI_CPUFREQ - tristate "SCPI based CPUfreq driver" - depends on ARM_BIG_LITTLE_CPUFREQ && ARM_SCPI_PROTOCOL && COMMON_CLK_SCPI - help - This adds the CPUfreq driver support for ARM big.LITTLE platforms - using SCPI protocol for CPU power management. - - This driver uses SCPI Message Protocol driver to interact with the - firmware providing the CPU DVFS functionality. - config ARM_SPEAR_CPUFREQ bool "SPEAr CPUFreq support" depends on PLAT_SPEAR @@ -275,20 +298,3 @@ config ARM_PXA2xx_CPUFREQ This add the CPUFreq driver support for Intel PXA2xx SOCs. If in doubt, say N. - -config ACPI_CPPC_CPUFREQ - tristate "CPUFreq driver based on the ACPI CPPC spec" - depends on ACPI_PROCESSOR - select ACPI_CPPC_LIB - default n - help - This adds a CPUFreq driver which uses CPPC methods - as described in the ACPIv5.1 spec. CPPC stands for - Collaborative Processor Performance Controls. It - is based on an abstract continuous scale of CPU - performance values which allows the remote power - processor to flexibly optimize for power and - performance. CPPC relies on power management firmware - support for its operation. - - If in doubt, say N. diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile index 812f9e0d01a3..e07715ce8844 100644 --- a/drivers/cpufreq/Makefile +++ b/drivers/cpufreq/Makefile @@ -52,23 +52,26 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o # LITTLE drivers, so that it is probed last. obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o +obj-$(CONFIG_ARM_ARMADA_37XX_CPUFREQ) += armada-37xx-cpufreq.o obj-$(CONFIG_ARM_BRCMSTB_AVS_CPUFREQ) += brcmstb-avs-cpufreq.o +obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o obj-$(CONFIG_ARM_EXYNOS5440_CPUFREQ) += exynos5440-cpufreq.o obj-$(CONFIG_ARM_HIGHBANK_CPUFREQ) += highbank-cpufreq.o obj-$(CONFIG_ARM_IMX6Q_CPUFREQ) += imx6q-cpufreq.o obj-$(CONFIG_ARM_KIRKWOOD_CPUFREQ) += kirkwood-cpufreq.o obj-$(CONFIG_ARM_MEDIATEK_CPUFREQ) += mediatek-cpufreq.o +obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o obj-$(CONFIG_ARM_OMAP2PLUS_CPUFREQ) += omap-cpufreq.o obj-$(CONFIG_ARM_PXA2xx_CPUFREQ) += pxa2xx-cpufreq.o obj-$(CONFIG_PXA3xx) += pxa3xx-cpufreq.o -obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o -obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o obj-$(CONFIG_ARM_S3C2410_CPUFREQ) += s3c2410-cpufreq.o obj-$(CONFIG_ARM_S3C2412_CPUFREQ) += s3c2412-cpufreq.o obj-$(CONFIG_ARM_S3C2416_CPUFREQ) += s3c2416-cpufreq.o obj-$(CONFIG_ARM_S3C2440_CPUFREQ) += s3c2440-cpufreq.o obj-$(CONFIG_ARM_S3C64XX_CPUFREQ) += s3c64xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ) += s3c24xx-cpufreq.o +obj-$(CONFIG_ARM_S3C24XX_CPUFREQ_DEBUGFS) += s3c24xx-cpufreq-debugfs.o obj-$(CONFIG_ARM_S5PV210_CPUFREQ) += s5pv210-cpufreq.o obj-$(CONFIG_ARM_SA1100_CPUFREQ) += sa1100-cpufreq.o obj-$(CONFIG_ARM_SA1110_CPUFREQ) += sa1110-cpufreq.o @@ -81,8 +84,6 @@ obj-$(CONFIG_ARM_TEGRA124_CPUFREQ) += tegra124-cpufreq.o obj-$(CONFIG_ARM_TEGRA186_CPUFREQ) += tegra186-cpufreq.o obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-cpufreq.o obj-$(CONFIG_ARM_VEXPRESS_SPC_CPUFREQ) += vexpress-spc-cpufreq.o -obj-$(CONFIG_ACPI_CPPC_CPUFREQ) += cppc_cpufreq.o -obj-$(CONFIG_MACH_MVEBU_V7) += mvebu-cpufreq.o ################################################################################## diff --git a/drivers/cpufreq/arm_big_little.c b/drivers/cpufreq/arm_big_little.c index 65ec5f01aa8d..c56b57dcfda5 100644 --- a/drivers/cpufreq/arm_big_little.c +++ b/drivers/cpufreq/arm_big_little.c @@ -526,34 +526,13 @@ static int bL_cpufreq_exit(struct cpufreq_policy *policy) static void bL_cpufreq_ready(struct cpufreq_policy *policy) { - struct device *cpu_dev = get_cpu_device(policy->cpu); int cur_cluster = cpu_to_cluster(policy->cpu); - struct device_node *np; /* Do not register a cpu_cooling device if we are in IKS mode */ if (cur_cluster >= MAX_CLUSTERS) return; - np = of_node_get(cpu_dev->of_node); - if (WARN_ON(!np)) - return; - - if (of_find_property(np, "#cooling-cells", NULL)) { - u32 power_coefficient = 0; - - of_property_read_u32(np, "dynamic-power-coefficient", - &power_coefficient); - - cdev[cur_cluster] = of_cpufreq_power_cooling_register(np, - policy, power_coefficient, NULL); - if (IS_ERR(cdev[cur_cluster])) { - dev_err(cpu_dev, - "running cpufreq without cooling device: %ld\n", - PTR_ERR(cdev[cur_cluster])); - cdev[cur_cluster] = NULL; - } - } - of_node_put(np); + cdev[cur_cluster] = of_cpufreq_cooling_register(policy); } static struct cpufreq_driver bL_cpufreq_driver = { diff --git a/drivers/cpufreq/armada-37xx-cpufreq.c b/drivers/cpufreq/armada-37xx-cpufreq.c new file mode 100644 index 000000000000..c6ebc88a7d8d --- /dev/null +++ b/drivers/cpufreq/armada-37xx-cpufreq.c @@ -0,0 +1,241 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * CPU frequency scaling support for Armada 37xx platform. + * + * Copyright (C) 2017 Marvell + * + * Gregory CLEMENT <gregory.clement@free-electrons.com> + */ + +#include <linux/clk.h> +#include <linux/cpu.h> +#include <linux/cpufreq.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +/* Power management in North Bridge register set */ +#define ARMADA_37XX_NB_L0L1 0x18 +#define ARMADA_37XX_NB_L2L3 0x1C +#define ARMADA_37XX_NB_TBG_DIV_OFF 13 +#define ARMADA_37XX_NB_TBG_DIV_MASK 0x7 +#define ARMADA_37XX_NB_CLK_SEL_OFF 11 +#define ARMADA_37XX_NB_CLK_SEL_MASK 0x1 +#define ARMADA_37XX_NB_CLK_SEL_TBG 0x1 +#define ARMADA_37XX_NB_TBG_SEL_OFF 9 +#define ARMADA_37XX_NB_TBG_SEL_MASK 0x3 +#define ARMADA_37XX_NB_VDD_SEL_OFF 6 +#define ARMADA_37XX_NB_VDD_SEL_MASK 0x3 +#define ARMADA_37XX_NB_CONFIG_SHIFT 16 +#define ARMADA_37XX_NB_DYN_MOD 0x24 +#define ARMADA_37XX_NB_CLK_SEL_EN BIT(26) +#define ARMADA_37XX_NB_TBG_EN BIT(28) +#define ARMADA_37XX_NB_DIV_EN BIT(29) +#define ARMADA_37XX_NB_VDD_EN BIT(30) +#define ARMADA_37XX_NB_DFS_EN BIT(31) +#define ARMADA_37XX_NB_CPU_LOAD 0x30 +#define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3 +#define ARMADA_37XX_DVFS_LOAD_0 0 +#define ARMADA_37XX_DVFS_LOAD_1 1 +#define ARMADA_37XX_DVFS_LOAD_2 2 +#define ARMADA_37XX_DVFS_LOAD_3 3 + +/* + * On Armada 37xx the Power management manages 4 level of CPU load, + * each level can be associated with a CPU clock source, a CPU + * divider, a VDD level, etc... + */ +#define LOAD_LEVEL_NR 4 + +struct armada_37xx_dvfs { + u32 cpu_freq_max; + u8 divider[LOAD_LEVEL_NR]; +}; + +static struct armada_37xx_dvfs armada_37xx_dvfs[] = { + {.cpu_freq_max = 1200*1000*1000, .divider = {1, 2, 4, 6} }, + {.cpu_freq_max = 1000*1000*1000, .divider = {1, 2, 4, 5} }, + {.cpu_freq_max = 800*1000*1000, .divider = {1, 2, 3, 4} }, + {.cpu_freq_max = 600*1000*1000, .divider = {2, 4, 5, 6} }, +}; + +static struct armada_37xx_dvfs *armada_37xx_cpu_freq_info_get(u32 freq) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(armada_37xx_dvfs); i++) { + if (freq == armada_37xx_dvfs[i].cpu_freq_max) + return &armada_37xx_dvfs[i]; + } + + pr_err("Unsupported CPU frequency %d MHz\n", freq/1000000); + return NULL; +} + +/* + * Setup the four level managed by the hardware. Once the four level + * will be configured then the DVFS will be enabled. + */ +static void __init armada37xx_cpufreq_dvfs_setup(struct regmap *base, + struct clk *clk, u8 *divider) +{ + int load_lvl; + struct clk *parent; + + for (load_lvl = 0; load_lvl < LOAD_LEVEL_NR; load_lvl++) { + unsigned int reg, mask, val, offset = 0; + + if (load_lvl <= ARMADA_37XX_DVFS_LOAD_1) + reg = ARMADA_37XX_NB_L0L1; + else + reg = ARMADA_37XX_NB_L2L3; + + if (load_lvl == ARMADA_37XX_DVFS_LOAD_0 || + load_lvl == ARMADA_37XX_DVFS_LOAD_2) + offset += ARMADA_37XX_NB_CONFIG_SHIFT; + + /* Set cpu clock source, for all the level we use TBG */ + val = ARMADA_37XX_NB_CLK_SEL_TBG << ARMADA_37XX_NB_CLK_SEL_OFF; + mask = (ARMADA_37XX_NB_CLK_SEL_MASK + << ARMADA_37XX_NB_CLK_SEL_OFF); + + /* + * Set cpu divider based on the pre-computed array in + * order to have balanced step. + */ + val |= divider[load_lvl] << ARMADA_37XX_NB_TBG_DIV_OFF; + mask |= (ARMADA_37XX_NB_TBG_DIV_MASK + << ARMADA_37XX_NB_TBG_DIV_OFF); + + /* Set VDD divider which is actually the load level. */ + val |= load_lvl << ARMADA_37XX_NB_VDD_SEL_OFF; + mask |= (ARMADA_37XX_NB_VDD_SEL_MASK + << ARMADA_37XX_NB_VDD_SEL_OFF); + + val <<= offset; + mask <<= offset; + + regmap_update_bits(base, reg, mask, val); + } + + /* + * Set cpu clock source, for all the level we keep the same + * clock source that the one already configured. For this one + * we need to use the clock framework + */ + parent = clk_get_parent(clk); + clk_set_parent(clk, parent); +} + +static void __init armada37xx_cpufreq_disable_dvfs(struct regmap *base) +{ + unsigned int reg = ARMADA_37XX_NB_DYN_MOD, + mask = ARMADA_37XX_NB_DFS_EN; + + regmap_update_bits(base, reg, mask, 0); +} + +static void __init armada37xx_cpufreq_enable_dvfs(struct regmap *base) +{ + unsigned int val, reg = ARMADA_37XX_NB_CPU_LOAD, + mask = ARMADA_37XX_NB_CPU_LOAD_MASK; + + /* Start with the highest load (0) */ + val = ARMADA_37XX_DVFS_LOAD_0; + regmap_update_bits(base, reg, mask, val); + + /* Now enable DVFS for the CPUs */ + reg = ARMADA_37XX_NB_DYN_MOD; + mask = ARMADA_37XX_NB_CLK_SEL_EN | ARMADA_37XX_NB_TBG_EN | + ARMADA_37XX_NB_DIV_EN | ARMADA_37XX_NB_VDD_EN | + ARMADA_37XX_NB_DFS_EN; + + regmap_update_bits(base, reg, mask, mask); +} + +static int __init armada37xx_cpufreq_driver_init(void) +{ + struct armada_37xx_dvfs *dvfs; + struct platform_device *pdev; + unsigned int cur_frequency; + struct regmap *nb_pm_base; + struct device *cpu_dev; + int load_lvl, ret; + struct clk *clk; + + nb_pm_base = + syscon_regmap_lookup_by_compatible("marvell,armada-3700-nb-pm"); + + if (IS_ERR(nb_pm_base)) + return -ENODEV; + + /* Before doing any configuration on the DVFS first, disable it */ + armada37xx_cpufreq_disable_dvfs(nb_pm_base); + + /* + * On CPU 0 register the operating points supported (which are + * the nominal CPU frequency and full integer divisions of + * it). + */ + cpu_dev = get_cpu_device(0); + if (!cpu_dev) { + dev_err(cpu_dev, "Cannot get CPU\n"); + return -ENODEV; + } + + clk = clk_get(cpu_dev, 0); + if (IS_ERR(clk)) { + dev_err(cpu_dev, "Cannot get clock for CPU0\n"); + return PTR_ERR(clk); + } + + /* Get nominal (current) CPU frequency */ + cur_frequency = clk_get_rate(clk); + if (!cur_frequency) { + dev_err(cpu_dev, "Failed to get clock rate for CPU\n"); + return -EINVAL; + } + + dvfs = armada_37xx_cpu_freq_info_get(cur_frequency); + if (!dvfs) + return -EINVAL; + + armada37xx_cpufreq_dvfs_setup(nb_pm_base, clk, dvfs->divider); + + for (load_lvl = ARMADA_37XX_DVFS_LOAD_0; load_lvl < LOAD_LEVEL_NR; + load_lvl++) { + unsigned long freq = cur_frequency / dvfs->divider[load_lvl]; + + ret = dev_pm_opp_add(cpu_dev, freq, 0); + if (ret) { + /* clean-up the already added opp before leaving */ + while (load_lvl-- > ARMADA_37XX_DVFS_LOAD_0) { + freq = cur_frequency / dvfs->divider[load_lvl]; + dev_pm_opp_remove(cpu_dev, freq); + } + return ret; + } + } + + /* Now that everything is setup, enable the DVFS at hardware level */ + armada37xx_cpufreq_enable_dvfs(nb_pm_base); + + pdev = platform_device_register_simple("cpufreq-dt", -1, NULL, 0); + + return PTR_ERR_OR_ZERO(pdev); +} +/* late_initcall, to guarantee the driver is loaded after A37xx clock driver */ +late_initcall(armada37xx_cpufreq_driver_init); + +MODULE_AUTHOR("Gregory CLEMENT <gregory.clement@free-electrons.com>"); +MODULE_DESCRIPTION("Armada 37xx cpufreq driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/cpufreq/cpufreq-dt-platdev.c b/drivers/cpufreq/cpufreq-dt-platdev.c index ecc56e26f8f6..3b585e4bfac5 100644 --- a/drivers/cpufreq/cpufreq-dt-platdev.c +++ b/drivers/cpufreq/cpufreq-dt-platdev.c @@ -108,6 +108,14 @@ static const struct of_device_id blacklist[] __initconst = { { .compatible = "marvell,armadaxp", }, + { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt2712", }, + { .compatible = "mediatek,mt7622", }, + { .compatible = "mediatek,mt7623", }, + { .compatible = "mediatek,mt817x", }, + { .compatible = "mediatek,mt8173", }, + { .compatible = "mediatek,mt8176", }, + { .compatible = "nvidia,tegra124", }, { .compatible = "st,stih407", }, diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index 545946ad0752..de3d104c25d7 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -319,33 +319,8 @@ static int cpufreq_exit(struct cpufreq_policy *policy) static void cpufreq_ready(struct cpufreq_policy *policy) { struct private_data *priv = policy->driver_data; - struct device_node *np = of_node_get(priv->cpu_dev->of_node); - if (WARN_ON(!np)) - return; - - /* - * For now, just loading the cooling device; - * thermal DT code takes care of matching them. - */ - if (of_find_property(np, "#cooling-cells", NULL)) { - u32 power_coefficient = 0; - - of_property_read_u32(np, "dynamic-power-coefficient", - &power_coefficient); - - priv->cdev = of_cpufreq_power_cooling_register(np, - policy, power_coefficient, NULL); - if (IS_ERR(priv->cdev)) { - dev_err(priv->cpu_dev, - "running cpufreq without cooling device: %ld\n", - PTR_ERR(priv->cdev)); - - priv->cdev = NULL; - } - } - - of_node_put(np); + priv->cdev = of_cpufreq_cooling_register(policy); } static struct cpufreq_driver dt_cpufreq_driver = { diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 41d148af7748..421f318c0e66 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -601,19 +601,18 @@ static struct cpufreq_governor *find_governor(const char *str_governor) /** * cpufreq_parse_governor - parse a governor string */ -static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, - struct cpufreq_governor **governor) +static int cpufreq_parse_governor(char *str_governor, + struct cpufreq_policy *policy) { - int err = -EINVAL; - if (cpufreq_driver->setpolicy) { if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN)) { - *policy = CPUFREQ_POLICY_PERFORMANCE; - err = 0; - } else if (!strncasecmp(str_governor, "powersave", - CPUFREQ_NAME_LEN)) { - *policy = CPUFREQ_POLICY_POWERSAVE; - err = 0; + policy->policy = CPUFREQ_POLICY_PERFORMANCE; + return 0; + } + + if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN)) { + policy->policy = CPUFREQ_POLICY_POWERSAVE; + return 0; } } else { struct cpufreq_governor *t; @@ -621,26 +620,31 @@ static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, mutex_lock(&cpufreq_governor_mutex); t = find_governor(str_governor); - - if (t == NULL) { + if (!t) { int ret; mutex_unlock(&cpufreq_governor_mutex); + ret = request_module("cpufreq_%s", str_governor); - mutex_lock(&cpufreq_governor_mutex); + if (ret) + return -EINVAL; - if (ret == 0) - t = find_governor(str_governor); - } + mutex_lock(&cpufreq_governor_mutex); - if (t != NULL) { - *governor = t; - err = 0; + t = find_governor(str_governor); } + if (t && !try_module_get(t->owner)) + t = NULL; mutex_unlock(&cpufreq_governor_mutex); + + if (t) { + policy->governor = t; + return 0; + } } - return err; + + return -EINVAL; } /** @@ -760,11 +764,14 @@ static ssize_t store_scaling_governor(struct cpufreq_policy *policy, if (ret != 1) return -EINVAL; - if (cpufreq_parse_governor(str_governor, &new_policy.policy, - &new_policy.governor)) + if (cpufreq_parse_governor(str_governor, &new_policy)) return -EINVAL; ret = cpufreq_set_policy(policy, &new_policy); + + if (new_policy.governor) + module_put(new_policy.governor->owner); + return ret ? ret : count; } @@ -1044,8 +1051,7 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy) if (policy->last_policy) new_policy.policy = policy->last_policy; else - cpufreq_parse_governor(gov->name, &new_policy.policy, - NULL); + cpufreq_parse_governor(gov->name, &new_policy); } /* set default policy */ return cpufreq_set_policy(policy, &new_policy); @@ -2160,7 +2166,6 @@ void cpufreq_unregister_governor(struct cpufreq_governor *governor) mutex_lock(&cpufreq_governor_mutex); list_del(&governor->governor_list); mutex_unlock(&cpufreq_governor_mutex); - return; } EXPORT_SYMBOL_GPL(cpufreq_unregister_governor); diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c index 1e55b5790853..1572129844a5 100644 --- a/drivers/cpufreq/cpufreq_stats.c +++ b/drivers/cpufreq/cpufreq_stats.c @@ -27,7 +27,7 @@ struct cpufreq_stats { unsigned int *trans_table; }; -static int cpufreq_stats_update(struct cpufreq_stats *stats) +static void cpufreq_stats_update(struct cpufreq_stats *stats) { unsigned long long cur_time = get_jiffies_64(); @@ -35,7 +35,6 @@ static int cpufreq_stats_update(struct cpufreq_stats *stats) stats->time_in_state[stats->last_index] += cur_time - stats->last_time; stats->last_time = cur_time; spin_unlock(&cpufreq_stats_lock); - return 0; } static void cpufreq_stats_clear_table(struct cpufreq_stats *stats) diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index d9b2c2de49c4..741f22e5cee3 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -25,15 +25,29 @@ static struct regulator *arm_reg; static struct regulator *pu_reg; static struct regulator *soc_reg; -static struct clk *arm_clk; -static struct clk *pll1_sys_clk; -static struct clk *pll1_sw_clk; -static struct clk *step_clk; -static struct clk *pll2_pfd2_396m_clk; - -/* clk used by i.MX6UL */ -static struct clk *pll2_bus_clk; -static struct clk *secondary_sel_clk; +enum IMX6_CPUFREQ_CLKS { + ARM, + PLL1_SYS, + STEP, + PLL1_SW, + PLL2_PFD2_396M, + /* MX6UL requires two more clks */ + PLL2_BUS, + SECONDARY_SEL, +}; +#define IMX6Q_CPUFREQ_CLK_NUM 5 +#define IMX6UL_CPUFREQ_CLK_NUM 7 + +static int num_clks; +static struct clk_bulk_data clks[] = { + { .id = "arm" }, + { .id = "pll1_sys" }, + { .id = "step" }, + { .id = "pll1_sw" }, + { .id = "pll2_pfd2_396m" }, + { .id = "pll2_bus" }, + { .id = "secondary_sel" }, +}; static struct device *cpu_dev; static bool free_opp; @@ -53,7 +67,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) new_freq = freq_table[index].frequency; freq_hz = new_freq * 1000; - old_freq = clk_get_rate(arm_clk) / 1000; + old_freq = clk_get_rate(clks[ARM].clk) / 1000; opp = dev_pm_opp_find_freq_ceil(cpu_dev, &freq_hz); if (IS_ERR(opp)) { @@ -112,29 +126,35 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) * voltage of 528MHz, so lower the CPU frequency to one * half before changing CPU frequency. */ - clk_set_rate(arm_clk, (old_freq >> 1) * 1000); - clk_set_parent(pll1_sw_clk, pll1_sys_clk); - if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) - clk_set_parent(secondary_sel_clk, pll2_bus_clk); + clk_set_rate(clks[ARM].clk, (old_freq >> 1) * 1000); + clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk); + if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) + clk_set_parent(clks[SECONDARY_SEL].clk, + clks[PLL2_BUS].clk); else - clk_set_parent(secondary_sel_clk, pll2_pfd2_396m_clk); - clk_set_parent(step_clk, secondary_sel_clk); - clk_set_parent(pll1_sw_clk, step_clk); + clk_set_parent(clks[SECONDARY_SEL].clk, + clks[PLL2_PFD2_396M].clk); + clk_set_parent(clks[STEP].clk, clks[SECONDARY_SEL].clk); + clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk); + if (freq_hz > clk_get_rate(clks[PLL2_BUS].clk)) { + clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000); + clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk); + } } else { - clk_set_parent(step_clk, pll2_pfd2_396m_clk); - clk_set_parent(pll1_sw_clk, step_clk); - if (freq_hz > clk_get_rate(pll2_pfd2_396m_clk)) { - clk_set_rate(pll1_sys_clk, new_freq * 1000); - clk_set_parent(pll1_sw_clk, pll1_sys_clk); + clk_set_parent(clks[STEP].clk, clks[PLL2_PFD2_396M].clk); + clk_set_parent(clks[PLL1_SW].clk, clks[STEP].clk); + if (freq_hz > clk_get_rate(clks[PLL2_PFD2_396M].clk)) { + clk_set_rate(clks[PLL1_SYS].clk, new_freq * 1000); + clk_set_parent(clks[PLL1_SW].clk, clks[PLL1_SYS].clk); } else { /* pll1_sys needs to be enabled for divider rate change to work. */ pll1_sys_temp_enabled = true; - clk_prepare_enable(pll1_sys_clk); + clk_prepare_enable(clks[PLL1_SYS].clk); } } /* Ensure the arm clock divider is what we expect */ - ret = clk_set_rate(arm_clk, new_freq * 1000); + ret = clk_set_rate(clks[ARM].clk, new_freq * 1000); if (ret) { dev_err(cpu_dev, "failed to set clock rate: %d\n", ret); regulator_set_voltage_tol(arm_reg, volt_old, 0); @@ -143,7 +163,7 @@ static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) /* PLL1 is only needed until after ARM-PODF is set. */ if (pll1_sys_temp_enabled) - clk_disable_unprepare(pll1_sys_clk); + clk_disable_unprepare(clks[PLL1_SYS].clk); /* scaling down? scale voltage after frequency */ if (new_freq < old_freq) { @@ -174,7 +194,7 @@ static int imx6q_cpufreq_init(struct cpufreq_policy *policy) { int ret; - policy->clk = arm_clk; + policy->clk = clks[ARM].clk; ret = cpufreq_generic_init(policy, freq_table, transition_latency); policy->suspend_freq = policy->max; @@ -244,6 +264,43 @@ put_node: of_node_put(np); } +#define OCOTP_CFG3_6UL_SPEED_696MHZ 0x2 + +static void imx6ul_opp_check_speed_grading(struct device *dev) +{ + struct device_node *np; + void __iomem *base; + u32 val; + + np = of_find_compatible_node(NULL, NULL, "fsl,imx6ul-ocotp"); + if (!np) + return; + + base = of_iomap(np, 0); + if (!base) { + dev_err(dev, "failed to map ocotp\n"); + goto put_node; + } + + /* + * Speed GRADING[1:0] defines the max speed of ARM: + * 2b'00: Reserved; + * 2b'01: 528000000Hz; + * 2b'10: 696000000Hz; + * 2b'11: Reserved; + * We need to set the max speed of ARM according to fuse map. + */ + val = readl_relaxed(base + OCOTP_CFG3); + val >>= OCOTP_CFG3_SPEED_SHIFT; + val &= 0x3; + if (val != OCOTP_CFG3_6UL_SPEED_696MHZ) + if (dev_pm_opp_disable(dev, 696000000)) + dev_warn(dev, "failed to disable 696MHz OPP\n"); + iounmap(base); +put_node: + of_node_put(np); +} + static int imx6q_cpufreq_probe(struct platform_device *pdev) { struct device_node *np; @@ -266,28 +323,15 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) return -ENOENT; } - arm_clk = clk_get(cpu_dev, "arm"); - pll1_sys_clk = clk_get(cpu_dev, "pll1_sys"); - pll1_sw_clk = clk_get(cpu_dev, "pll1_sw"); - step_clk = clk_get(cpu_dev, "step"); - pll2_pfd2_396m_clk = clk_get(cpu_dev, "pll2_pfd2_396m"); - if (IS_ERR(arm_clk) || IS_ERR(pll1_sys_clk) || IS_ERR(pll1_sw_clk) || - IS_ERR(step_clk) || IS_ERR(pll2_pfd2_396m_clk)) { - dev_err(cpu_dev, "failed to get clocks\n"); - ret = -ENOENT; - goto put_clk; - } - if (of_machine_is_compatible("fsl,imx6ul") || - of_machine_is_compatible("fsl,imx6ull")) { - pll2_bus_clk = clk_get(cpu_dev, "pll2_bus"); - secondary_sel_clk = clk_get(cpu_dev, "secondary_sel"); - if (IS_ERR(pll2_bus_clk) || IS_ERR(secondary_sel_clk)) { - dev_err(cpu_dev, "failed to get clocks specific to imx6ul\n"); - ret = -ENOENT; - goto put_clk; - } - } + of_machine_is_compatible("fsl,imx6ull")) + num_clks = IMX6UL_CPUFREQ_CLK_NUM; + else + num_clks = IMX6Q_CPUFREQ_CLK_NUM; + + ret = clk_bulk_get(cpu_dev, num_clks, clks); + if (ret) + goto put_node; arm_reg = regulator_get(cpu_dev, "arm"); pu_reg = regulator_get_optional(cpu_dev, "pu"); @@ -311,7 +355,10 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) goto put_reg; } - imx6q_opp_check_speed_grading(cpu_dev); + if (of_machine_is_compatible("fsl,imx6ul")) + imx6ul_opp_check_speed_grading(cpu_dev); + else + imx6q_opp_check_speed_grading(cpu_dev); /* Because we have added the OPPs here, we must free them */ free_opp = true; @@ -424,22 +471,11 @@ put_reg: regulator_put(pu_reg); if (!IS_ERR(soc_reg)) regulator_put(soc_reg); -put_clk: - if (!IS_ERR(arm_clk)) - clk_put(arm_clk); - if (!IS_ERR(pll1_sys_clk)) - clk_put(pll1_sys_clk); - if (!IS_ERR(pll1_sw_clk)) - clk_put(pll1_sw_clk); - if (!IS_ERR(step_clk)) - clk_put(step_clk); - if (!IS_ERR(pll2_pfd2_396m_clk)) - clk_put(pll2_pfd2_396m_clk); - if (!IS_ERR(pll2_bus_clk)) - clk_put(pll2_bus_clk); - if (!IS_ERR(secondary_sel_clk)) - clk_put(secondary_sel_clk); + + clk_bulk_put(num_clks, clks); +put_node: of_node_put(np); + return ret; } @@ -453,13 +489,8 @@ static int imx6q_cpufreq_remove(struct platform_device *pdev) if (!IS_ERR(pu_reg)) regulator_put(pu_reg); regulator_put(soc_reg); - clk_put(arm_clk); - clk_put(pll1_sys_clk); - clk_put(pll1_sw_clk); - clk_put(step_clk); - clk_put(pll2_pfd2_396m_clk); - clk_put(pll2_bus_clk); - clk_put(secondary_sel_clk); + + clk_bulk_put(num_clks, clks); return 0; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 93a0e88bef76..7edf7a0e5a96 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -1595,15 +1595,6 @@ static const struct pstate_funcs knl_funcs = { .get_val = core_get_val, }; -static const struct pstate_funcs bxt_funcs = { - .get_max = core_get_max_pstate, - .get_max_physical = core_get_max_pstate_physical, - .get_min = core_get_min_pstate, - .get_turbo = core_get_turbo_pstate, - .get_scaling = core_get_scaling, - .get_val = core_get_val, -}; - #define ICPU(model, policy) \ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_APERFMPERF,\ (unsigned long)&policy } @@ -1627,8 +1618,9 @@ static const struct x86_cpu_id intel_pstate_cpu_ids[] = { ICPU(INTEL_FAM6_BROADWELL_XEON_D, core_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNL, knl_funcs), ICPU(INTEL_FAM6_XEON_PHI_KNM, knl_funcs), - ICPU(INTEL_FAM6_ATOM_GOLDMONT, bxt_funcs), - ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, bxt_funcs), + ICPU(INTEL_FAM6_ATOM_GOLDMONT, core_funcs), + ICPU(INTEL_FAM6_ATOM_GEMINI_LAKE, core_funcs), + ICPU(INTEL_FAM6_SKYLAKE_X, core_funcs), {} }; MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids); diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index c46a12df40dd..5faa37c5b091 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -894,7 +894,7 @@ static int longhaul_cpu_init(struct cpufreq_policy *policy) if ((longhaul_version != TYPE_LONGHAUL_V1) && (scale_voltage != 0)) longhaul_setup_voltagescaling(); - policy->cpuinfo.transition_latency = 200000; /* nsec */ + policy->transition_delay_us = 200000; /* usec */ return cpufreq_table_validate_and_show(policy, longhaul_table); } diff --git a/drivers/cpufreq/mediatek-cpufreq.c b/drivers/cpufreq/mediatek-cpufreq.c index e0d5090b303d..8c04dddd3c28 100644 --- a/drivers/cpufreq/mediatek-cpufreq.c +++ b/drivers/cpufreq/mediatek-cpufreq.c @@ -310,28 +310,8 @@ static int mtk_cpufreq_set_target(struct cpufreq_policy *policy, static void mtk_cpufreq_ready(struct cpufreq_policy *policy) { struct mtk_cpu_dvfs_info *info = policy->driver_data; - struct device_node *np = of_node_get(info->cpu_dev->of_node); - u32 capacitance = 0; - if (WARN_ON(!np)) - return; - - if (of_find_property(np, "#cooling-cells", NULL)) { - of_property_read_u32(np, DYNAMIC_POWER, &capacitance); - - info->cdev = of_cpufreq_power_cooling_register(np, - policy, capacitance, NULL); - - if (IS_ERR(info->cdev)) { - dev_err(info->cpu_dev, - "running cpufreq without cooling device: %ld\n", - PTR_ERR(info->cdev)); - - info->cdev = NULL; - } - } - - of_node_put(np); + info->cdev = of_cpufreq_cooling_register(policy); } static int mtk_cpu_dvfs_info_init(struct mtk_cpu_dvfs_info *info, int cpu) @@ -574,6 +554,7 @@ static struct platform_driver mtk_cpufreq_platdrv = { /* List of machines supported by this driver */ static const struct of_device_id mtk_cpufreq_machines[] __initconst = { { .compatible = "mediatek,mt2701", }, + { .compatible = "mediatek,mt2712", }, { .compatible = "mediatek,mt7622", }, { .compatible = "mediatek,mt7623", }, { .compatible = "mediatek,mt817x", }, diff --git a/drivers/cpufreq/mvebu-cpufreq.c b/drivers/cpufreq/mvebu-cpufreq.c index ed915ee85dd9..31513bd42705 100644 --- a/drivers/cpufreq/mvebu-cpufreq.c +++ b/drivers/cpufreq/mvebu-cpufreq.c @@ -76,12 +76,6 @@ static int __init armada_xp_pmsu_cpufreq_init(void) return PTR_ERR(clk); } - /* - * In case of a failure of dev_pm_opp_add(), we don't - * bother with cleaning up the registered OPP (there's - * no function to do so), and simply cancel the - * registration of the cpufreq device. - */ ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk), 0); if (ret) { clk_put(clk); @@ -91,7 +85,8 @@ static int __init armada_xp_pmsu_cpufreq_init(void) ret = dev_pm_opp_add(cpu_dev, clk_get_rate(clk) / 2, 0); if (ret) { clk_put(clk); - return ret; + dev_err(cpu_dev, "Failed to register OPPs\n"); + goto opp_register_failed; } ret = dev_pm_opp_set_sharing_cpus(cpu_dev, @@ -99,9 +94,16 @@ static int __init armada_xp_pmsu_cpufreq_init(void) if (ret) dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); + clk_put(clk); } platform_device_register_simple("cpufreq-dt", -1, NULL, 0); return 0; + +opp_register_failed: + /* As registering has failed remove all the opp for all cpus */ + dev_pm_opp_cpumask_remove_table(cpu_possible_mask); + + return ret; } device_initcall(armada_xp_pmsu_cpufreq_init); diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c index b6d7c4c98d0a..29cdec198657 100644 --- a/drivers/cpufreq/powernv-cpufreq.c +++ b/drivers/cpufreq/powernv-cpufreq.c @@ -29,6 +29,7 @@ #include <linux/reboot.h> #include <linux/slab.h> #include <linux/cpu.h> +#include <linux/hashtable.h> #include <trace/events/power.h> #include <asm/cputhreads.h> @@ -38,14 +39,13 @@ #include <asm/opal.h> #include <linux/timer.h> -#define POWERNV_MAX_PSTATES 256 +#define POWERNV_MAX_PSTATES_ORDER 8 +#define POWERNV_MAX_PSTATES (1UL << (POWERNV_MAX_PSTATES_ORDER)) #define PMSR_PSAFE_ENABLE (1UL << 30) #define PMSR_SPR_EM_DISABLE (1UL << 31) -#define PMSR_MAX(x) ((x >> 32) & 0xFF) +#define MAX_PSTATE_SHIFT 32 #define LPSTATE_SHIFT 48 #define GPSTATE_SHIFT 56 -#define GET_LPSTATE(x) (((x) >> LPSTATE_SHIFT) & 0xFF) -#define GET_GPSTATE(x) (((x) >> GPSTATE_SHIFT) & 0xFF) #define MAX_RAMP_DOWN_TIME 5120 /* @@ -94,6 +94,27 @@ struct global_pstate_info { }; static struct cpufreq_frequency_table powernv_freqs[POWERNV_MAX_PSTATES+1]; + +DEFINE_HASHTABLE(pstate_revmap, POWERNV_MAX_PSTATES_ORDER); +/** + * struct pstate_idx_revmap_data: Entry in the hashmap pstate_revmap + * indexed by a function of pstate id. + * + * @pstate_id: pstate id for this entry. + * + * @cpufreq_table_idx: Index into the powernv_freqs + * cpufreq_frequency_table for frequency + * corresponding to pstate_id. + * + * @hentry: hlist_node that hooks this entry into the pstate_revmap + * hashtable + */ +struct pstate_idx_revmap_data { + u8 pstate_id; + unsigned int cpufreq_table_idx; + struct hlist_node hentry; +}; + static bool rebooting, throttled, occ_reset; static const char * const throttle_reason[] = { @@ -148,39 +169,56 @@ static struct powernv_pstate_info { bool wof_enabled; } powernv_pstate_info; -/* Use following macros for conversions between pstate_id and index */ -static inline int idx_to_pstate(unsigned int i) +static inline u8 extract_pstate(u64 pmsr_val, unsigned int shift) +{ + return ((pmsr_val >> shift) & 0xFF); +} + +#define extract_local_pstate(x) extract_pstate(x, LPSTATE_SHIFT) +#define extract_global_pstate(x) extract_pstate(x, GPSTATE_SHIFT) +#define extract_max_pstate(x) extract_pstate(x, MAX_PSTATE_SHIFT) + +/* Use following functions for conversions between pstate_id and index */ + +/** + * idx_to_pstate : Returns the pstate id corresponding to the + * frequency in the cpufreq frequency table + * powernv_freqs indexed by @i. + * + * If @i is out of bound, this will return the pstate + * corresponding to the nominal frequency. + */ +static inline u8 idx_to_pstate(unsigned int i) { if (unlikely(i >= powernv_pstate_info.nr_pstates)) { - pr_warn_once("index %u is out of bound\n", i); + pr_warn_once("idx_to_pstate: index %u is out of bound\n", i); return powernv_freqs[powernv_pstate_info.nominal].driver_data; } return powernv_freqs[i].driver_data; } -static inline unsigned int pstate_to_idx(int pstate) +/** + * pstate_to_idx : Returns the index in the cpufreq frequencytable + * powernv_freqs for the frequency whose corresponding + * pstate id is @pstate. + * + * If no frequency corresponding to @pstate is found, + * this will return the index of the nominal + * frequency. + */ +static unsigned int pstate_to_idx(u8 pstate) { - int min = powernv_freqs[powernv_pstate_info.min].driver_data; - int max = powernv_freqs[powernv_pstate_info.max].driver_data; + unsigned int key = pstate % POWERNV_MAX_PSTATES; + struct pstate_idx_revmap_data *revmap_data; - if (min > 0) { - if (unlikely((pstate < max) || (pstate > min))) { - pr_warn_once("pstate %d is out of bound\n", pstate); - return powernv_pstate_info.nominal; - } - } else { - if (unlikely((pstate > max) || (pstate < min))) { - pr_warn_once("pstate %d is out of bound\n", pstate); - return powernv_pstate_info.nominal; - } + hash_for_each_possible(pstate_revmap, revmap_data, hentry, key) { + if (revmap_data->pstate_id == pstate) + return revmap_data->cpufreq_table_idx; } - /* - * abs() is deliberately used so that is works with - * both monotonically increasing and decreasing - * pstate values - */ - return abs(pstate - idx_to_pstate(powernv_pstate_info.max)); + + pr_warn_once("pstate_to_idx: pstate 0x%x not found\n", pstate); + return powernv_pstate_info.nominal; } static inline void reset_gpstates(struct cpufreq_policy *policy) @@ -247,7 +285,7 @@ static int init_powernv_pstates(void) powernv_pstate_info.wof_enabled = true; next: - pr_info("cpufreq pstate min %d nominal %d max %d\n", pstate_min, + pr_info("cpufreq pstate min 0x%x nominal 0x%x max 0x%x\n", pstate_min, pstate_nominal, pstate_max); pr_info("Workload Optimized Frequency is %s in the platform\n", (powernv_pstate_info.wof_enabled) ? "enabled" : "disabled"); @@ -278,19 +316,30 @@ next: powernv_pstate_info.nr_pstates = nr_pstates; pr_debug("NR PStates %d\n", nr_pstates); + for (i = 0; i < nr_pstates; i++) { u32 id = be32_to_cpu(pstate_ids[i]); u32 freq = be32_to_cpu(pstate_freqs[i]); + struct pstate_idx_revmap_data *revmap_data; + unsigned int key; pr_debug("PState id %d freq %d MHz\n", id, freq); powernv_freqs[i].frequency = freq * 1000; /* kHz */ - powernv_freqs[i].driver_data = id; + powernv_freqs[i].driver_data = id & 0xFF; + + revmap_data = (struct pstate_idx_revmap_data *) + kmalloc(sizeof(*revmap_data), GFP_KERNEL); + + revmap_data->pstate_id = id & 0xFF; + revmap_data->cpufreq_table_idx = i; + key = (revmap_data->pstate_id) % POWERNV_MAX_PSTATES; + hash_add(pstate_revmap, &revmap_data->hentry, key); if (id == pstate_max) powernv_pstate_info.max = i; - else if (id == pstate_nominal) + if (id == pstate_nominal) powernv_pstate_info.nominal = i; - else if (id == pstate_min) + if (id == pstate_min) powernv_pstate_info.min = i; if (powernv_pstate_info.wof_enabled && id == pstate_turbo) { @@ -307,14 +356,13 @@ next: } /* Returns the CPU frequency corresponding to the pstate_id. */ -static unsigned int pstate_id_to_freq(int pstate_id) +static unsigned int pstate_id_to_freq(u8 pstate_id) { int i; i = pstate_to_idx(pstate_id); if (i >= powernv_pstate_info.nr_pstates || i < 0) { - pr_warn("PState id %d outside of PState table, " - "reporting nominal id %d instead\n", + pr_warn("PState id 0x%x outside of PState table, reporting nominal id 0x%x instead\n", pstate_id, idx_to_pstate(powernv_pstate_info.nominal)); i = powernv_pstate_info.nominal; } @@ -420,8 +468,8 @@ static inline void set_pmspr(unsigned long sprn, unsigned long val) */ struct powernv_smp_call_data { unsigned int freq; - int pstate_id; - int gpstate_id; + u8 pstate_id; + u8 gpstate_id; }; /* @@ -438,22 +486,15 @@ struct powernv_smp_call_data { static void powernv_read_cpu_freq(void *arg) { unsigned long pmspr_val; - s8 local_pstate_id; struct powernv_smp_call_data *freq_data = arg; pmspr_val = get_pmspr(SPRN_PMSR); - - /* - * The local pstate id corresponds bits 48..55 in the PMSR. - * Note: Watch out for the sign! - */ - local_pstate_id = (pmspr_val >> 48) & 0xFF; - freq_data->pstate_id = local_pstate_id; + freq_data->pstate_id = extract_local_pstate(pmspr_val); freq_data->freq = pstate_id_to_freq(freq_data->pstate_id); - pr_debug("cpu %d pmsr %016lX pstate_id %d frequency %d kHz\n", - raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, - freq_data->freq); + pr_debug("cpu %d pmsr %016lX pstate_id 0x%x frequency %d kHz\n", + raw_smp_processor_id(), pmspr_val, freq_data->pstate_id, + freq_data->freq); } /* @@ -515,21 +556,21 @@ static void powernv_cpufreq_throttle_check(void *data) struct chip *chip; unsigned int cpu = smp_processor_id(); unsigned long pmsr; - int pmsr_pmax; + u8 pmsr_pmax; unsigned int pmsr_pmax_idx; pmsr = get_pmspr(SPRN_PMSR); chip = this_cpu_read(chip_info); /* Check for Pmax Capping */ - pmsr_pmax = (s8)PMSR_MAX(pmsr); + pmsr_pmax = extract_max_pstate(pmsr); pmsr_pmax_idx = pstate_to_idx(pmsr_pmax); if (pmsr_pmax_idx != powernv_pstate_info.max) { if (chip->throttled) goto next; chip->throttled = true; if (pmsr_pmax_idx > powernv_pstate_info.nominal) { - pr_warn_once("CPU %d on Chip %u has Pmax(%d) reduced below nominal frequency(%d)\n", + pr_warn_once("CPU %d on Chip %u has Pmax(0x%x) reduced below that of nominal frequency(0x%x)\n", cpu, chip->id, pmsr_pmax, idx_to_pstate(powernv_pstate_info.nominal)); chip->throttle_sub_turbo++; @@ -645,8 +686,8 @@ void gpstate_timer_handler(struct timer_list *t) * value. Hence, read from PMCR to get correct data. */ val = get_pmspr(SPRN_PMCR); - freq_data.gpstate_id = (s8)GET_GPSTATE(val); - freq_data.pstate_id = (s8)GET_LPSTATE(val); + freq_data.gpstate_id = extract_global_pstate(val); + freq_data.pstate_id = extract_local_pstate(val); if (freq_data.gpstate_id == freq_data.pstate_id) { reset_gpstates(policy); spin_unlock(&gpstates->gpstate_lock); diff --git a/drivers/cpufreq/qoriq-cpufreq.c b/drivers/cpufreq/qoriq-cpufreq.c index 4ada55b8856e..0562761a3dec 100644 --- a/drivers/cpufreq/qoriq-cpufreq.c +++ b/drivers/cpufreq/qoriq-cpufreq.c @@ -275,20 +275,8 @@ static int qoriq_cpufreq_target(struct cpufreq_policy *policy, static void qoriq_cpufreq_ready(struct cpufreq_policy *policy) { struct cpu_data *cpud = policy->driver_data; - struct device_node *np = of_get_cpu_node(policy->cpu, NULL); - if (of_find_property(np, "#cooling-cells", NULL)) { - cpud->cdev = of_cpufreq_cooling_register(np, policy); - - if (IS_ERR(cpud->cdev) && PTR_ERR(cpud->cdev) != -ENOSYS) { - pr_err("cpu%d is not running as cooling device: %ld\n", - policy->cpu, PTR_ERR(cpud->cdev)); - - cpud->cdev = NULL; - } - } - - of_node_put(np); + cpud->cdev = of_cpufreq_cooling_register(policy); } static struct cpufreq_driver qoriq_cpufreq_driver = { diff --git a/drivers/cpufreq/scpi-cpufreq.c b/drivers/cpufreq/scpi-cpufreq.c index 05d299052c5c..247fcbfa4cb5 100644 --- a/drivers/cpufreq/scpi-cpufreq.c +++ b/drivers/cpufreq/scpi-cpufreq.c @@ -18,27 +18,89 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt +#include <linux/clk.h> #include <linux/cpu.h> #include <linux/cpufreq.h> +#include <linux/cpumask.h> +#include <linux/cpu_cooling.h> +#include <linux/export.h> #include <linux/module.h> -#include <linux/platform_device.h> +#include <linux/of_platform.h> #include <linux/pm_opp.h> #include <linux/scpi_protocol.h> +#include <linux/slab.h> #include <linux/types.h> -#include "arm_big_little.h" +struct scpi_data { + struct clk *clk; + struct device *cpu_dev; + struct thermal_cooling_device *cdev; +}; static struct scpi_ops *scpi_ops; -static int scpi_get_transition_latency(struct device *cpu_dev) +static unsigned int scpi_cpufreq_get_rate(unsigned int cpu) +{ + struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); + struct scpi_data *priv = policy->driver_data; + unsigned long rate = clk_get_rate(priv->clk); + + return rate / 1000; +} + +static int +scpi_cpufreq_set_target(struct cpufreq_policy *policy, unsigned int index) +{ + struct scpi_data *priv = policy->driver_data; + u64 rate = policy->freq_table[index].frequency * 1000; + int ret; + + ret = clk_set_rate(priv->clk, rate); + if (!ret && (clk_get_rate(priv->clk) != rate)) + ret = -EIO; + + return ret; +} + +static int +scpi_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpumask) { - return scpi_ops->get_transition_latency(cpu_dev); + int cpu, domain, tdomain; + struct device *tcpu_dev; + + domain = scpi_ops->device_domain_id(cpu_dev); + if (domain < 0) + return domain; + + for_each_possible_cpu(cpu) { + if (cpu == cpu_dev->id) + continue; + + tcpu_dev = get_cpu_device(cpu); + if (!tcpu_dev) + continue; + + tdomain = scpi_ops->device_domain_id(tcpu_dev); + if (tdomain == domain) + cpumask_set_cpu(cpu, cpumask); + } + + return 0; } -static int scpi_init_opp_table(const struct cpumask *cpumask) +static int scpi_cpufreq_init(struct cpufreq_policy *policy) { int ret; - struct device *cpu_dev = get_cpu_device(cpumask_first(cpumask)); + unsigned int latency; + struct device *cpu_dev; + struct scpi_data *priv; + struct cpufreq_frequency_table *freq_table; + + cpu_dev = get_cpu_device(policy->cpu); + if (!cpu_dev) { + pr_err("failed to get cpu%d device\n", policy->cpu); + return -ENODEV; + } ret = scpi_ops->add_opps_to_device(cpu_dev); if (ret) { @@ -46,32 +108,133 @@ static int scpi_init_opp_table(const struct cpumask *cpumask) return ret; } - ret = dev_pm_opp_set_sharing_cpus(cpu_dev, cpumask); - if (ret) + ret = scpi_get_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { + dev_warn(cpu_dev, "failed to get sharing cpumask\n"); + return ret; + } + + ret = dev_pm_opp_set_sharing_cpus(cpu_dev, policy->cpus); + if (ret) { dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n", __func__, ret); + return ret; + } + + ret = dev_pm_opp_get_opp_count(cpu_dev); + if (ret <= 0) { + dev_dbg(cpu_dev, "OPP table is not ready, deferring probe\n"); + ret = -EPROBE_DEFER; + goto out_free_opp; + } + + priv = kzalloc(sizeof(*priv), GFP_KERNEL); + if (!priv) { + ret = -ENOMEM; + goto out_free_opp; + } + + ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); + if (ret) { + dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret); + goto out_free_priv; + } + + priv->cpu_dev = cpu_dev; + priv->clk = clk_get(cpu_dev, NULL); + if (IS_ERR(priv->clk)) { + dev_err(cpu_dev, "%s: Failed to get clk for cpu: %d\n", + __func__, cpu_dev->id); + goto out_free_cpufreq_table; + } + + policy->driver_data = priv; + + ret = cpufreq_table_validate_and_show(policy, freq_table); + if (ret) { + dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__, + ret); + goto out_put_clk; + } + + /* scpi allows DVFS request for any domain from any CPU */ + policy->dvfs_possible_from_any_cpu = true; + + latency = scpi_ops->get_transition_latency(cpu_dev); + if (!latency) + latency = CPUFREQ_ETERNAL; + + policy->cpuinfo.transition_latency = latency; + + policy->fast_switch_possible = false; + return 0; + +out_put_clk: + clk_put(priv->clk); +out_free_cpufreq_table: + dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); +out_free_priv: + kfree(priv); +out_free_opp: + dev_pm_opp_cpumask_remove_table(policy->cpus); + return ret; } -static const struct cpufreq_arm_bL_ops scpi_cpufreq_ops = { - .name = "scpi", - .get_transition_latency = scpi_get_transition_latency, - .init_opp_table = scpi_init_opp_table, - .free_opp_table = dev_pm_opp_cpumask_remove_table, +static int scpi_cpufreq_exit(struct cpufreq_policy *policy) +{ + struct scpi_data *priv = policy->driver_data; + + cpufreq_cooling_unregister(priv->cdev); + clk_put(priv->clk); + dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); + kfree(priv); + dev_pm_opp_cpumask_remove_table(policy->related_cpus); + + return 0; +} + +static void scpi_cpufreq_ready(struct cpufreq_policy *policy) +{ + struct scpi_data *priv = policy->driver_data; + struct thermal_cooling_device *cdev; + + cdev = of_cpufreq_cooling_register(policy); + if (!IS_ERR(cdev)) + priv->cdev = cdev; +} + +static struct cpufreq_driver scpi_cpufreq_driver = { + .name = "scpi-cpufreq", + .flags = CPUFREQ_STICKY | CPUFREQ_HAVE_GOVERNOR_PER_POLICY | + CPUFREQ_NEED_INITIAL_FREQ_CHECK, + .verify = cpufreq_generic_frequency_table_verify, + .attr = cpufreq_generic_attr, + .get = scpi_cpufreq_get_rate, + .init = scpi_cpufreq_init, + .exit = scpi_cpufreq_exit, + .ready = scpi_cpufreq_ready, + .target_index = scpi_cpufreq_set_target, }; static int scpi_cpufreq_probe(struct platform_device *pdev) { + int ret; + scpi_ops = get_scpi_ops(); if (!scpi_ops) return -EIO; - return bL_cpufreq_register(&scpi_cpufreq_ops); + ret = cpufreq_register_driver(&scpi_cpufreq_driver); + if (ret) + dev_err(&pdev->dev, "%s: registering cpufreq failed, err: %d\n", + __func__, ret); + return ret; } static int scpi_cpufreq_remove(struct platform_device *pdev) { - bL_cpufreq_unregister(&scpi_cpufreq_ops); + cpufreq_unregister_driver(&scpi_cpufreq_driver); scpi_ops = NULL; return 0; } diff --git a/drivers/cpufreq/ti-cpufreq.c b/drivers/cpufreq/ti-cpufreq.c index 923317f03b4b..a099b7bf74cd 100644 --- a/drivers/cpufreq/ti-cpufreq.c +++ b/drivers/cpufreq/ti-cpufreq.c @@ -17,6 +17,7 @@ #include <linux/cpu.h> #include <linux/io.h> #include <linux/mfd/syscon.h> +#include <linux/module.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_platform.h> @@ -50,6 +51,7 @@ struct ti_cpufreq_soc_data { unsigned long efuse_mask; unsigned long efuse_shift; unsigned long rev_offset; + bool multi_regulator; }; struct ti_cpufreq_data { @@ -57,6 +59,7 @@ struct ti_cpufreq_data { struct device_node *opp_node; struct regmap *syscon; const struct ti_cpufreq_soc_data *soc_data; + struct opp_table *opp_table; }; static unsigned long amx3_efuse_xlate(struct ti_cpufreq_data *opp_data, @@ -95,6 +98,7 @@ static struct ti_cpufreq_soc_data am3x_soc_data = { .efuse_offset = 0x07fc, .efuse_mask = 0x1fff, .rev_offset = 0x600, + .multi_regulator = false, }; static struct ti_cpufreq_soc_data am4x_soc_data = { @@ -103,6 +107,7 @@ static struct ti_cpufreq_soc_data am4x_soc_data = { .efuse_offset = 0x0610, .efuse_mask = 0x3f, .rev_offset = 0x600, + .multi_regulator = false, }; static struct ti_cpufreq_soc_data dra7_soc_data = { @@ -111,6 +116,7 @@ static struct ti_cpufreq_soc_data dra7_soc_data = { .efuse_mask = 0xf80000, .efuse_shift = 19, .rev_offset = 0x204, + .multi_regulator = true, }; /** @@ -195,12 +201,14 @@ static const struct of_device_id ti_cpufreq_of_match[] = { {}, }; -static int ti_cpufreq_init(void) +static int ti_cpufreq_probe(struct platform_device *pdev) { u32 version[VERSION_COUNT]; struct device_node *np; const struct of_device_id *match; + struct opp_table *ti_opp_table; struct ti_cpufreq_data *opp_data; + const char * const reg_names[] = {"vdd", "vbb"}; int ret; np = of_find_node_by_path("/"); @@ -247,16 +255,29 @@ static int ti_cpufreq_init(void) if (ret) goto fail_put_node; - ret = PTR_ERR_OR_ZERO(dev_pm_opp_set_supported_hw(opp_data->cpu_dev, - version, VERSION_COUNT)); - if (ret) { + ti_opp_table = dev_pm_opp_set_supported_hw(opp_data->cpu_dev, + version, VERSION_COUNT); + if (IS_ERR(ti_opp_table)) { dev_err(opp_data->cpu_dev, "Failed to set supported hardware\n"); + ret = PTR_ERR(ti_opp_table); goto fail_put_node; } - of_node_put(opp_data->opp_node); + opp_data->opp_table = ti_opp_table; + + if (opp_data->soc_data->multi_regulator) { + ti_opp_table = dev_pm_opp_set_regulators(opp_data->cpu_dev, + reg_names, + ARRAY_SIZE(reg_names)); + if (IS_ERR(ti_opp_table)) { + dev_pm_opp_put_supported_hw(opp_data->opp_table); + ret = PTR_ERR(ti_opp_table); + goto fail_put_node; + } + } + of_node_put(opp_data->opp_node); register_cpufreq_dt: platform_device_register_simple("cpufreq-dt", -1, NULL, 0); @@ -269,4 +290,22 @@ free_opp_data: return ret; } -device_initcall(ti_cpufreq_init); + +static int ti_cpufreq_init(void) +{ + platform_device_register_simple("ti-cpufreq", -1, NULL, 0); + return 0; +} +module_init(ti_cpufreq_init); + +static struct platform_driver ti_cpufreq_driver = { + .probe = ti_cpufreq_probe, + .driver = { + .name = "ti-cpufreq", + }, +}; +module_platform_driver(ti_cpufreq_driver); + +MODULE_DESCRIPTION("TI CPUFreq/OPP hw-supported driver"); +MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c index 4e78263e34a4..5d359aff3cc5 100644 --- a/drivers/cpuidle/governor.c +++ b/drivers/cpuidle/governor.c @@ -36,14 +36,15 @@ static struct cpuidle_governor * __cpuidle_find_governor(const char *str) /** * cpuidle_switch_governor - changes the governor * @gov: the new target governor - * - * NOTE: "gov" can be NULL to specify disabled * Must be called with cpuidle_lock acquired. */ int cpuidle_switch_governor(struct cpuidle_governor *gov) { struct cpuidle_device *dev; + if (!gov) + return -EINVAL; + if (gov == cpuidle_curr_governor) return 0; diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 78fb496ecb4e..fe2af6aa88fc 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -737,7 +737,7 @@ struct devfreq *devm_devfreq_add_device(struct device *dev, devfreq = devfreq_add_device(dev, profile, governor_name, data); if (IS_ERR(devfreq)) { devres_free(ptr); - return ERR_PTR(-ENOMEM); + return devfreq; } *ptr = devfreq; @@ -996,7 +996,8 @@ static ssize_t governor_store(struct device *dev, struct device_attribute *attr, if (df->governor == governor) { ret = 0; goto out; - } else if (df->governor->immutable || governor->immutable) { + } else if ((df->governor && df->governor->immutable) || + governor->immutable) { ret = -EINVAL; goto out; } diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c index 2b2c7db3e480..35c3936edc45 100644 --- a/drivers/dma/sh/rcar-dmac.c +++ b/drivers/dma/sh/rcar-dmac.c @@ -1615,22 +1615,6 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec, * Power management */ -#ifdef CONFIG_PM_SLEEP -static int rcar_dmac_sleep_suspend(struct device *dev) -{ - /* - * TODO: Wait for the current transfer to complete and stop the device. - */ - return 0; -} - -static int rcar_dmac_sleep_resume(struct device *dev) -{ - /* TODO: Resume transfers, if any. */ - return 0; -} -#endif - #ifdef CONFIG_PM static int rcar_dmac_runtime_suspend(struct device *dev) { @@ -1646,7 +1630,13 @@ static int rcar_dmac_runtime_resume(struct device *dev) #endif static const struct dev_pm_ops rcar_dmac_pm = { - SET_SYSTEM_SLEEP_PM_OPS(rcar_dmac_sleep_suspend, rcar_dmac_sleep_resume) + /* + * TODO for system sleep/resume: + * - Wait for the current transfer to complete and stop the device, + * - Resume transfers, if any. + */ + SET_LATE_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) SET_RUNTIME_PM_OPS(rcar_dmac_runtime_suspend, rcar_dmac_runtime_resume, NULL) }; diff --git a/drivers/firmware/psci_checker.c b/drivers/firmware/psci_checker.c index f3f4f810e5df..bb1c068bff19 100644 --- a/drivers/firmware/psci_checker.c +++ b/drivers/firmware/psci_checker.c @@ -77,8 +77,8 @@ static int psci_ops_check(void) return 0; } -static int find_clusters(const struct cpumask *cpus, - const struct cpumask **clusters) +static int find_cpu_groups(const struct cpumask *cpus, + const struct cpumask **cpu_groups) { unsigned int nb = 0; cpumask_var_t tmp; @@ -88,11 +88,11 @@ static int find_clusters(const struct cpumask *cpus, cpumask_copy(tmp, cpus); while (!cpumask_empty(tmp)) { - const struct cpumask *cluster = + const struct cpumask *cpu_group = topology_core_cpumask(cpumask_any(tmp)); - clusters[nb++] = cluster; - cpumask_andnot(tmp, tmp, cluster); + cpu_groups[nb++] = cpu_group; + cpumask_andnot(tmp, tmp, cpu_group); } free_cpumask_var(tmp); @@ -170,24 +170,24 @@ static int hotplug_tests(void) { int err; cpumask_var_t offlined_cpus; - int i, nb_cluster; - const struct cpumask **clusters; + int i, nb_cpu_group; + const struct cpumask **cpu_groups; char *page_buf; err = -ENOMEM; if (!alloc_cpumask_var(&offlined_cpus, GFP_KERNEL)) return err; - /* We may have up to nb_available_cpus clusters. */ - clusters = kmalloc_array(nb_available_cpus, sizeof(*clusters), - GFP_KERNEL); - if (!clusters) + /* We may have up to nb_available_cpus cpu_groups. */ + cpu_groups = kmalloc_array(nb_available_cpus, sizeof(*cpu_groups), + GFP_KERNEL); + if (!cpu_groups) goto out_free_cpus; page_buf = (char *)__get_free_page(GFP_KERNEL); if (!page_buf) - goto out_free_clusters; + goto out_free_cpu_groups; err = 0; - nb_cluster = find_clusters(cpu_online_mask, clusters); + nb_cpu_group = find_cpu_groups(cpu_online_mask, cpu_groups); /* * Of course the last CPU cannot be powered down and cpu_down() should @@ -197,24 +197,22 @@ static int hotplug_tests(void) err += down_and_up_cpus(cpu_online_mask, offlined_cpus); /* - * Take down CPUs by cluster this time. When the last CPU is turned - * off, the cluster itself should shut down. + * Take down CPUs by cpu group this time. When the last CPU is turned + * off, the cpu group itself should shut down. */ - for (i = 0; i < nb_cluster; ++i) { - int cluster_id = - topology_physical_package_id(cpumask_any(clusters[i])); + for (i = 0; i < nb_cpu_group; ++i) { ssize_t len = cpumap_print_to_pagebuf(true, page_buf, - clusters[i]); + cpu_groups[i]); /* Remove trailing newline. */ page_buf[len - 1] = '\0'; - pr_info("Trying to turn off and on again cluster %d " - "(CPUs %s)\n", cluster_id, page_buf); - err += down_and_up_cpus(clusters[i], offlined_cpus); + pr_info("Trying to turn off and on again group %d (CPUs %s)\n", + i, page_buf); + err += down_and_up_cpus(cpu_groups[i], offlined_cpus); } free_page((unsigned long)page_buf); -out_free_clusters: - kfree(clusters); +out_free_cpu_groups: + kfree(cpu_groups); out_free_cpus: free_cpumask_var(offlined_cpus); return err; diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h index 21bf619a86c5..9fee4c054d3d 100644 --- a/drivers/i2c/busses/i2c-designware-core.h +++ b/drivers/i2c/busses/i2c-designware-core.h @@ -280,8 +280,6 @@ struct dw_i2c_dev { int (*acquire_lock)(struct dw_i2c_dev *dev); void (*release_lock)(struct dw_i2c_dev *dev); bool pm_disabled; - bool suspended; - bool skip_resume; void (*disable)(struct dw_i2c_dev *dev); void (*disable_int)(struct dw_i2c_dev *dev); int (*init)(struct dw_i2c_dev *dev); diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c index 58add69a441c..153b947702c5 100644 --- a/drivers/i2c/busses/i2c-designware-platdrv.c +++ b/drivers/i2c/busses/i2c-designware-platdrv.c @@ -42,6 +42,7 @@ #include <linux/reset.h> #include <linux/sched.h> #include <linux/slab.h> +#include <linux/suspend.h> #include "i2c-designware-core.h" @@ -372,6 +373,11 @@ static int dw_i2c_plat_probe(struct platform_device *pdev) ACPI_COMPANION_SET(&adap->dev, ACPI_COMPANION(&pdev->dev)); adap->dev.of_node = pdev->dev.of_node; + dev_pm_set_driver_flags(&pdev->dev, + DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED); + /* The code below assumes runtime PM to be disabled. */ WARN_ON(pm_runtime_enabled(&pdev->dev)); @@ -435,12 +441,24 @@ MODULE_DEVICE_TABLE(of, dw_i2c_of_match); #ifdef CONFIG_PM_SLEEP static int dw_i2c_plat_prepare(struct device *dev) { - return pm_runtime_suspended(dev); + /* + * If the ACPI companion device object is present for this device, it + * may be accessed during suspend and resume of other devices via I2C + * operation regions, so tell the PM core and middle layers to avoid + * skipping system suspend/resume callbacks for it in that case. + */ + return !has_acpi_companion(dev); } static void dw_i2c_plat_complete(struct device *dev) { - if (dev->power.direct_complete) + /* + * The device can only be in runtime suspend at this point if it has not + * been resumed throughout the ending system suspend/resume cycle, so if + * the platform firmware might mess up with it, request the runtime PM + * framework to resume it. + */ + if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) pm_request_resume(dev); } #else @@ -453,16 +471,9 @@ static int dw_i2c_plat_suspend(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - if (i_dev->suspended) { - i_dev->skip_resume = true; - return 0; - } - i_dev->disable(i_dev); i2c_dw_plat_prepare_clk(i_dev, false); - i_dev->suspended = true; - return 0; } @@ -470,19 +481,9 @@ static int dw_i2c_plat_resume(struct device *dev) { struct dw_i2c_dev *i_dev = dev_get_drvdata(dev); - if (!i_dev->suspended) - return 0; - - if (i_dev->skip_resume) { - i_dev->skip_resume = false; - return 0; - } - i2c_dw_plat_prepare_clk(i_dev, true); i_dev->init(i_dev); - i_dev->suspended = false; - return 0; } diff --git a/drivers/mfd/intel-lpss.c b/drivers/mfd/intel-lpss.c index 0e0ab9bb1530..9e545eb6e8b4 100644 --- a/drivers/mfd/intel-lpss.c +++ b/drivers/mfd/intel-lpss.c @@ -450,6 +450,8 @@ int intel_lpss_probe(struct device *dev, if (ret) goto err_remove_ltr; + dev_pm_set_driver_flags(dev, DPM_FLAG_SMART_SUSPEND); + return 0; err_remove_ltr: @@ -478,7 +480,9 @@ EXPORT_SYMBOL_GPL(intel_lpss_remove); static int resume_lpss_device(struct device *dev, void *data) { - pm_runtime_resume(dev); + if (!dev_pm_test_driver_flags(dev, DPM_FLAG_SMART_SUSPEND)) + pm_runtime_resume(dev); + return 0; } diff --git a/drivers/opp/Makefile b/drivers/opp/Makefile index e70ceb406fe9..6ce6aefacc81 100644 --- a/drivers/opp/Makefile +++ b/drivers/opp/Makefile @@ -2,3 +2,4 @@ ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG obj-y += core.o cpu.o obj-$(CONFIG_OF) += of.o obj-$(CONFIG_DEBUG_FS) += debugfs.o +obj-$(CONFIG_ARM_TI_CPUFREQ) += ti-opp-supply.o diff --git a/drivers/opp/ti-opp-supply.c b/drivers/opp/ti-opp-supply.c new file mode 100644 index 000000000000..370eff3acd8a --- /dev/null +++ b/drivers/opp/ti-opp-supply.c @@ -0,0 +1,425 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2016-2017 Texas Instruments Incorporated - http://www.ti.com/ + * Nishanth Menon <nm@ti.com> + * Dave Gerlach <d-gerlach@ti.com> + * + * TI OPP supply driver that provides override into the regulator control + * for generic opp core to handle devices with ABB regulator and/or + * SmartReflex Class0. + */ +#include <linux/clk.h> +#include <linux/cpufreq.h> +#include <linux/device.h> +#include <linux/io.h> +#include <linux/module.h> +#include <linux/notifier.h> +#include <linux/of_device.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/pm_opp.h> +#include <linux/regulator/consumer.h> +#include <linux/slab.h> + +/** + * struct ti_opp_supply_optimum_voltage_table - optimized voltage table + * @reference_uv: reference voltage (usually Nominal voltage) + * @optimized_uv: Optimized voltage from efuse + */ +struct ti_opp_supply_optimum_voltage_table { + unsigned int reference_uv; + unsigned int optimized_uv; +}; + +/** + * struct ti_opp_supply_data - OMAP specific opp supply data + * @vdd_table: Optimized voltage mapping table + * @num_vdd_table: number of entries in vdd_table + * @vdd_absolute_max_voltage_uv: absolute maximum voltage in UV for the supply + */ +struct ti_opp_supply_data { + struct ti_opp_supply_optimum_voltage_table *vdd_table; + u32 num_vdd_table; + u32 vdd_absolute_max_voltage_uv; +}; + +static struct ti_opp_supply_data opp_data; + +/** + * struct ti_opp_supply_of_data - device tree match data + * @flags: specific type of opp supply + * @efuse_voltage_mask: mask required for efuse register representing voltage + * @efuse_voltage_uv: Are the efuse entries in micro-volts? if not, assume + * milli-volts. + */ +struct ti_opp_supply_of_data { +#define OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE BIT(1) +#define OPPDM_HAS_NO_ABB BIT(2) + const u8 flags; + const u32 efuse_voltage_mask; + const bool efuse_voltage_uv; +}; + +/** + * _store_optimized_voltages() - store optimized voltages + * @dev: ti opp supply device for which we need to store info + * @data: data specific to the device + * + * Picks up efuse based optimized voltages for VDD unique per device and + * stores it in internal data structure for use during transition requests. + * + * Return: If successful, 0, else appropriate error value. + */ +static int _store_optimized_voltages(struct device *dev, + struct ti_opp_supply_data *data) +{ + void __iomem *base; + struct property *prop; + struct resource *res; + const __be32 *val; + int proplen, i; + int ret = 0; + struct ti_opp_supply_optimum_voltage_table *table; + const struct ti_opp_supply_of_data *of_data = dev_get_drvdata(dev); + + /* pick up Efuse based voltages */ + res = platform_get_resource(to_platform_device(dev), IORESOURCE_MEM, 0); + if (!res) { + dev_err(dev, "Unable to get IO resource\n"); + ret = -ENODEV; + goto out_map; + } + + base = ioremap_nocache(res->start, resource_size(res)); + if (!base) { + dev_err(dev, "Unable to map Efuse registers\n"); + ret = -ENOMEM; + goto out_map; + } + + /* Fetch efuse-settings. */ + prop = of_find_property(dev->of_node, "ti,efuse-settings", NULL); + if (!prop) { + dev_err(dev, "No 'ti,efuse-settings' property found\n"); + ret = -EINVAL; + goto out; + } + + proplen = prop->length / sizeof(int); + data->num_vdd_table = proplen / 2; + /* Verify for corrupted OPP entries in dt */ + if (data->num_vdd_table * 2 * sizeof(int) != prop->length) { + dev_err(dev, "Invalid 'ti,efuse-settings'\n"); + ret = -EINVAL; + goto out; + } + + ret = of_property_read_u32(dev->of_node, "ti,absolute-max-voltage-uv", + &data->vdd_absolute_max_voltage_uv); + if (ret) { + dev_err(dev, "ti,absolute-max-voltage-uv is missing\n"); + ret = -EINVAL; + goto out; + } + + table = kzalloc(sizeof(*data->vdd_table) * + data->num_vdd_table, GFP_KERNEL); + if (!table) { + ret = -ENOMEM; + goto out; + } + data->vdd_table = table; + + val = prop->value; + for (i = 0; i < data->num_vdd_table; i++, table++) { + u32 efuse_offset; + u32 tmp; + + table->reference_uv = be32_to_cpup(val++); + efuse_offset = be32_to_cpup(val++); + + tmp = readl(base + efuse_offset); + tmp &= of_data->efuse_voltage_mask; + tmp >>= __ffs(of_data->efuse_voltage_mask); + + table->optimized_uv = of_data->efuse_voltage_uv ? tmp : + tmp * 1000; + + dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d vset=%d\n", + i, efuse_offset, table->reference_uv, + table->optimized_uv); + + /* + * Some older samples might not have optimized efuse + * Use reference voltage for those - just add debug message + * for them. + */ + if (!table->optimized_uv) { + dev_dbg(dev, "[%d] efuse=0x%08x volt_table=%d:vset0\n", + i, efuse_offset, table->reference_uv); + table->optimized_uv = table->reference_uv; + } + } +out: + iounmap(base); +out_map: + return ret; +} + +/** + * _free_optimized_voltages() - free resources for optvoltages + * @dev: device for which we need to free info + * @data: data specific to the device + */ +static void _free_optimized_voltages(struct device *dev, + struct ti_opp_supply_data *data) +{ + kfree(data->vdd_table); + data->vdd_table = NULL; + data->num_vdd_table = 0; +} + +/** + * _get_optimal_vdd_voltage() - Finds optimal voltage for the supply + * @dev: device for which we need to find info + * @data: data specific to the device + * @reference_uv: reference voltage (OPP voltage) for which we need value + * + * Return: if a match is found, return optimized voltage, else return + * reference_uv, also return reference_uv if no optimization is needed. + */ +static int _get_optimal_vdd_voltage(struct device *dev, + struct ti_opp_supply_data *data, + int reference_uv) +{ + int i; + struct ti_opp_supply_optimum_voltage_table *table; + + if (!data->num_vdd_table) + return reference_uv; + + table = data->vdd_table; + if (!table) + return -EINVAL; + + /* Find a exact match - this list is usually very small */ + for (i = 0; i < data->num_vdd_table; i++, table++) + if (table->reference_uv == reference_uv) + return table->optimized_uv; + + /* IF things are screwed up, we'd make a mess on console.. ratelimit */ + dev_err_ratelimited(dev, "%s: Failed optimized voltage match for %d\n", + __func__, reference_uv); + return reference_uv; +} + +static int _opp_set_voltage(struct device *dev, + struct dev_pm_opp_supply *supply, + int new_target_uv, struct regulator *reg, + char *reg_name) +{ + int ret; + unsigned long vdd_uv, uv_max; + + if (new_target_uv) + vdd_uv = new_target_uv; + else + vdd_uv = supply->u_volt; + + /* + * If we do have an absolute max voltage specified, then we should + * use that voltage instead to allow for cases where the voltage rails + * are ganged (example if we set the max for an opp as 1.12v, and + * the absolute max is 1.5v, for another rail to get 1.25v, it cannot + * be achieved if the regulator is constrainted to max of 1.12v, even + * if it can function at 1.25v + */ + if (opp_data.vdd_absolute_max_voltage_uv) + uv_max = opp_data.vdd_absolute_max_voltage_uv; + else + uv_max = supply->u_volt_max; + + if (vdd_uv > uv_max || + vdd_uv < supply->u_volt_min || + supply->u_volt_min > uv_max) { + dev_warn(dev, + "Invalid range voltages [Min:%lu target:%lu Max:%lu]\n", + supply->u_volt_min, vdd_uv, uv_max); + return -EINVAL; + } + + dev_dbg(dev, "%s scaling to %luuV[min %luuV max %luuV]\n", reg_name, + vdd_uv, supply->u_volt_min, + uv_max); + + ret = regulator_set_voltage_triplet(reg, + supply->u_volt_min, + vdd_uv, + uv_max); + if (ret) { + dev_err(dev, "%s failed for %luuV[min %luuV max %luuV]\n", + reg_name, vdd_uv, supply->u_volt_min, + uv_max); + return ret; + } + + return 0; +} + +/** + * ti_opp_supply_set_opp() - do the opp supply transition + * @data: information on regulators and new and old opps provided by + * opp core to use in transition + * + * Return: If successful, 0, else appropriate error value. + */ +static int ti_opp_supply_set_opp(struct dev_pm_set_opp_data *data) +{ + struct dev_pm_opp_supply *old_supply_vdd = &data->old_opp.supplies[0]; + struct dev_pm_opp_supply *old_supply_vbb = &data->old_opp.supplies[1]; + struct dev_pm_opp_supply *new_supply_vdd = &data->new_opp.supplies[0]; + struct dev_pm_opp_supply *new_supply_vbb = &data->new_opp.supplies[1]; + struct device *dev = data->dev; + unsigned long old_freq = data->old_opp.rate, freq = data->new_opp.rate; + struct clk *clk = data->clk; + struct regulator *vdd_reg = data->regulators[0]; + struct regulator *vbb_reg = data->regulators[1]; + int vdd_uv; + int ret; + + vdd_uv = _get_optimal_vdd_voltage(dev, &opp_data, + new_supply_vbb->u_volt); + + /* Scaling up? Scale voltage before frequency */ + if (freq > old_freq) { + ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg, + "vdd"); + if (ret) + goto restore_voltage; + + ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb"); + if (ret) + goto restore_voltage; + } + + /* Change frequency */ + dev_dbg(dev, "%s: switching OPP: %lu Hz --> %lu Hz\n", + __func__, old_freq, freq); + + ret = clk_set_rate(clk, freq); + if (ret) { + dev_err(dev, "%s: failed to set clock rate: %d\n", __func__, + ret); + goto restore_voltage; + } + + /* Scaling down? Scale voltage after frequency */ + if (freq < old_freq) { + ret = _opp_set_voltage(dev, new_supply_vbb, 0, vbb_reg, "vbb"); + if (ret) + goto restore_freq; + + ret = _opp_set_voltage(dev, new_supply_vdd, vdd_uv, vdd_reg, + "vdd"); + if (ret) + goto restore_freq; + } + + return 0; + +restore_freq: + ret = clk_set_rate(clk, old_freq); + if (ret) + dev_err(dev, "%s: failed to restore old-freq (%lu Hz)\n", + __func__, old_freq); +restore_voltage: + /* This shouldn't harm even if the voltages weren't updated earlier */ + if (old_supply_vdd->u_volt) { + ret = _opp_set_voltage(dev, old_supply_vbb, 0, vbb_reg, "vbb"); + if (ret) + return ret; + + ret = _opp_set_voltage(dev, old_supply_vdd, 0, vdd_reg, + "vdd"); + if (ret) + return ret; + } + + return ret; +} + +static const struct ti_opp_supply_of_data omap_generic_of_data = { +}; + +static const struct ti_opp_supply_of_data omap_omap5_of_data = { + .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE, + .efuse_voltage_mask = 0xFFF, + .efuse_voltage_uv = false, +}; + +static const struct ti_opp_supply_of_data omap_omap5core_of_data = { + .flags = OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE | OPPDM_HAS_NO_ABB, + .efuse_voltage_mask = 0xFFF, + .efuse_voltage_uv = false, +}; + +static const struct of_device_id ti_opp_supply_of_match[] = { + {.compatible = "ti,omap-opp-supply", .data = &omap_generic_of_data}, + {.compatible = "ti,omap5-opp-supply", .data = &omap_omap5_of_data}, + {.compatible = "ti,omap5-core-opp-supply", + .data = &omap_omap5core_of_data}, + {}, +}; +MODULE_DEVICE_TABLE(of, ti_opp_supply_of_match); + +static int ti_opp_supply_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device *cpu_dev = get_cpu_device(0); + const struct of_device_id *match; + const struct ti_opp_supply_of_data *of_data; + int ret = 0; + + match = of_match_device(ti_opp_supply_of_match, dev); + if (!match) { + /* We do not expect this to happen */ + dev_err(dev, "%s: Unable to match device\n", __func__); + return -ENODEV; + } + if (!match->data) { + /* Again, unlikely.. but mistakes do happen */ + dev_err(dev, "%s: Bad data in match\n", __func__); + return -EINVAL; + } + of_data = match->data; + + dev_set_drvdata(dev, (void *)of_data); + + /* If we need optimized voltage */ + if (of_data->flags & OPPDM_EFUSE_CLASS0_OPTIMIZED_VOLTAGE) { + ret = _store_optimized_voltages(dev, &opp_data); + if (ret) + return ret; + } + + ret = PTR_ERR_OR_ZERO(dev_pm_opp_register_set_opp_helper(cpu_dev, + ti_opp_supply_set_opp)); + if (ret) + _free_optimized_voltages(dev, &opp_data); + + return ret; +} + +static struct platform_driver ti_opp_supply_driver = { + .probe = ti_opp_supply_probe, + .driver = { + .name = "ti_opp_supply", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(ti_opp_supply_of_match), + }, +}; +module_platform_driver(ti_opp_supply_driver); + +MODULE_DESCRIPTION("Texas Instruments OMAP OPP Supply driver"); +MODULE_AUTHOR("Texas Instruments Inc."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c index 14fd865a5120..5958c8dda4e3 100644 --- a/drivers/pci/pci-driver.c +++ b/drivers/pci/pci-driver.c @@ -699,7 +699,7 @@ static void pci_pm_complete(struct device *dev) pm_generic_complete(dev); /* Resume device if platform firmware has put it in reset-power-on */ - if (dev->power.direct_complete && pm_resume_via_firmware()) { + if (pm_runtime_suspended(dev) && pm_resume_via_firmware()) { pci_power_t pre_sleep_state = pci_dev->current_state; pci_update_current_state(pci_dev, pci_dev->current_state); @@ -783,8 +783,10 @@ static int pci_pm_suspend_noirq(struct device *dev) struct pci_dev *pci_dev = to_pci_dev(dev); const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL; - if (dev_pm_smart_suspend_and_suspended(dev)) + if (dev_pm_smart_suspend_and_suspended(dev)) { + dev->power.may_skip_resume = true; return 0; + } if (pci_has_legacy_pm_support(pci_dev)) return pci_legacy_suspend_late(dev, PMSG_SUSPEND); @@ -838,6 +840,16 @@ static int pci_pm_suspend_noirq(struct device *dev) Fixup: pci_fixup_device(pci_fixup_suspend_late, pci_dev); + /* + * If the target system sleep state is suspend-to-idle, it is sufficient + * to check whether or not the device's wakeup settings are good for + * runtime PM. Otherwise, the pm_resume_via_firmware() check will cause + * pci_pm_complete() to take care of fixing up the device's state + * anyway, if need be. + */ + dev->power.may_skip_resume = device_may_wakeup(dev) || + !device_can_wakeup(dev); + return 0; } @@ -847,6 +859,9 @@ static int pci_pm_resume_noirq(struct device *dev) struct device_driver *drv = dev->driver; int error = 0; + if (dev_pm_may_skip_resume(dev)) + return 0; + /* * Devices with DPM_FLAG_SMART_SUSPEND may be left in runtime suspend * during system suspend, so update their runtime PM status to "active" @@ -953,7 +968,7 @@ static int pci_pm_freeze_late(struct device *dev) if (dev_pm_smart_suspend_and_suspended(dev)) return 0; - return pm_generic_freeze_late(dev);; + return pm_generic_freeze_late(dev); } static int pci_pm_freeze_noirq(struct device *dev) diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c index ffbf4e723527..fb1c1bb87316 100644 --- a/drivers/pci/pcie/portdrv_pci.c +++ b/drivers/pci/pcie/portdrv_pci.c @@ -150,6 +150,9 @@ static int pcie_portdrv_probe(struct pci_dev *dev, pci_save_state(dev); + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_LEAVE_SUSPENDED); + if (pci_bridge_d3_possible(dev)) { /* * Keep the port resumed 100ms to make sure things like diff --git a/drivers/platform/x86/surfacepro3_button.c b/drivers/platform/x86/surfacepro3_button.c index 6505c97705e1..1b491690ce07 100644 --- a/drivers/platform/x86/surfacepro3_button.c +++ b/drivers/platform/x86/surfacepro3_button.c @@ -119,7 +119,7 @@ static void surface_button_notify(struct acpi_device *device, u32 event) if (key_code == KEY_RESERVED) return; if (pressed) - pm_wakeup_event(&device->dev, 0); + pm_wakeup_dev_event(&device->dev, 0, button->suspended); if (button->suspended) return; input_report_key(input, key_code, pressed?1:0); @@ -185,6 +185,8 @@ static int surface_button_add(struct acpi_device *device) error = input_register_device(input); if (error) goto err_free_input; + + device_init_wakeup(&device->dev, true); dev_info(&device->dev, "%s [%s]\n", name, acpi_device_bid(device)); return 0; diff --git a/drivers/power/avs/rockchip-io-domain.c b/drivers/power/avs/rockchip-io-domain.c index 75f63e38a8d1..ed2b109ae8fc 100644 --- a/drivers/power/avs/rockchip-io-domain.c +++ b/drivers/power/avs/rockchip-io-domain.c @@ -76,7 +76,7 @@ struct rockchip_iodomain_supply { struct rockchip_iodomain { struct device *dev; struct regmap *grf; - struct rockchip_iodomain_soc_data *soc_data; + const struct rockchip_iodomain_soc_data *soc_data; struct rockchip_iodomain_supply supplies[MAX_SUPPLIES]; }; @@ -382,43 +382,43 @@ static const struct rockchip_iodomain_soc_data soc_data_rv1108_pmu = { static const struct of_device_id rockchip_iodomain_match[] = { { .compatible = "rockchip,rk3188-io-voltage-domain", - .data = (void *)&soc_data_rk3188 + .data = &soc_data_rk3188 }, { .compatible = "rockchip,rk3228-io-voltage-domain", - .data = (void *)&soc_data_rk3228 + .data = &soc_data_rk3228 }, { .compatible = "rockchip,rk3288-io-voltage-domain", - .data = (void *)&soc_data_rk3288 + .data = &soc_data_rk3288 }, { .compatible = "rockchip,rk3328-io-voltage-domain", - .data = (void *)&soc_data_rk3328 + .data = &soc_data_rk3328 }, { .compatible = "rockchip,rk3368-io-voltage-domain", - .data = (void *)&soc_data_rk3368 + .data = &soc_data_rk3368 }, { .compatible = "rockchip,rk3368-pmu-io-voltage-domain", - .data = (void *)&soc_data_rk3368_pmu + .data = &soc_data_rk3368_pmu }, { .compatible = "rockchip,rk3399-io-voltage-domain", - .data = (void *)&soc_data_rk3399 + .data = &soc_data_rk3399 }, { .compatible = "rockchip,rk3399-pmu-io-voltage-domain", - .data = (void *)&soc_data_rk3399_pmu + .data = &soc_data_rk3399_pmu }, { .compatible = "rockchip,rv1108-io-voltage-domain", - .data = (void *)&soc_data_rv1108 + .data = &soc_data_rv1108 }, { .compatible = "rockchip,rv1108-pmu-io-voltage-domain", - .data = (void *)&soc_data_rv1108_pmu + .data = &soc_data_rv1108_pmu }, { /* sentinel */ }, }; @@ -443,7 +443,7 @@ static int rockchip_iodomain_probe(struct platform_device *pdev) platform_set_drvdata(pdev, iod); match = of_match_node(rockchip_iodomain_match, np); - iod->soc_data = (struct rockchip_iodomain_soc_data *)match->data; + iod->soc_data = match->data; parent = pdev->dev.parent; if (parent && parent->of_node) { diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c index d1694f1def72..35636e1d8a3d 100644 --- a/drivers/powercap/intel_rapl.c +++ b/drivers/powercap/intel_rapl.c @@ -29,6 +29,7 @@ #include <linux/sysfs.h> #include <linux/cpu.h> #include <linux/powercap.h> +#include <linux/suspend.h> #include <asm/iosf_mbi.h> #include <asm/processor.h> @@ -155,6 +156,7 @@ struct rapl_power_limit { int prim_id; /* primitive ID used to enable */ struct rapl_domain *domain; const char *name; + u64 last_power_limit; }; static const char pl1_name[] = "long_term"; @@ -1209,7 +1211,7 @@ static int rapl_package_register_powercap(struct rapl_package *rp) struct rapl_domain *rd; char dev_name[17]; /* max domain name = 7 + 1 + 8 for int + 1 for null*/ struct powercap_zone *power_zone = NULL; - int nr_pl, ret;; + int nr_pl, ret; /* Update the domain data of the new package */ rapl_update_domain_data(rp); @@ -1533,6 +1535,92 @@ static int rapl_cpu_down_prep(unsigned int cpu) static enum cpuhp_state pcap_rapl_online; +static void power_limit_state_save(void) +{ + struct rapl_package *rp; + struct rapl_domain *rd; + int nr_pl, ret, i; + + get_online_cpus(); + list_for_each_entry(rp, &rapl_packages, plist) { + if (!rp->power_zone) + continue; + rd = power_zone_to_rapl_domain(rp->power_zone); + nr_pl = find_nr_power_limit(rd); + for (i = 0; i < nr_pl; i++) { + switch (rd->rpl[i].prim_id) { + case PL1_ENABLE: + ret = rapl_read_data_raw(rd, + POWER_LIMIT1, + true, + &rd->rpl[i].last_power_limit); + if (ret) + rd->rpl[i].last_power_limit = 0; + break; + case PL2_ENABLE: + ret = rapl_read_data_raw(rd, + POWER_LIMIT2, + true, + &rd->rpl[i].last_power_limit); + if (ret) + rd->rpl[i].last_power_limit = 0; + break; + } + } + } + put_online_cpus(); +} + +static void power_limit_state_restore(void) +{ + struct rapl_package *rp; + struct rapl_domain *rd; + int nr_pl, i; + + get_online_cpus(); + list_for_each_entry(rp, &rapl_packages, plist) { + if (!rp->power_zone) + continue; + rd = power_zone_to_rapl_domain(rp->power_zone); + nr_pl = find_nr_power_limit(rd); + for (i = 0; i < nr_pl; i++) { + switch (rd->rpl[i].prim_id) { + case PL1_ENABLE: + if (rd->rpl[i].last_power_limit) + rapl_write_data_raw(rd, + POWER_LIMIT1, + rd->rpl[i].last_power_limit); + break; + case PL2_ENABLE: + if (rd->rpl[i].last_power_limit) + rapl_write_data_raw(rd, + POWER_LIMIT2, + rd->rpl[i].last_power_limit); + break; + } + } + } + put_online_cpus(); +} + +static int rapl_pm_callback(struct notifier_block *nb, + unsigned long mode, void *_unused) +{ + switch (mode) { + case PM_SUSPEND_PREPARE: + power_limit_state_save(); + break; + case PM_POST_SUSPEND: + power_limit_state_restore(); + break; + } + return NOTIFY_OK; +} + +static struct notifier_block rapl_pm_notifier = { + .notifier_call = rapl_pm_callback, +}; + static int __init rapl_init(void) { const struct x86_cpu_id *id; @@ -1560,8 +1648,16 @@ static int __init rapl_init(void) /* Don't bail out if PSys is not supported */ rapl_register_psys(); + + ret = register_pm_notifier(&rapl_pm_notifier); + if (ret) + goto err_unreg_all; + return 0; +err_unreg_all: + cpuhp_remove_state(pcap_rapl_online); + err_unreg: rapl_unregister_powercap(); return ret; @@ -1569,6 +1665,7 @@ err_unreg: static void __exit rapl_exit(void) { + unregister_pm_notifier(&rapl_pm_notifier); cpuhp_remove_state(pcap_rapl_online); rapl_unregister_powercap(); } diff --git a/drivers/powercap/powercap_sys.c b/drivers/powercap/powercap_sys.c index 5b10b50f8686..64b2b2501a79 100644 --- a/drivers/powercap/powercap_sys.c +++ b/drivers/powercap/powercap_sys.c @@ -673,15 +673,13 @@ EXPORT_SYMBOL_GPL(powercap_unregister_control_type); static int __init powercap_init(void) { - int result = 0; + int result; result = seed_constraint_attributes(); if (result) return result; - result = class_register(&powercap_class); - - return result; + return class_register(&powercap_class); } device_initcall(powercap_init); diff --git a/drivers/scsi/scsi_transport_spi.c b/drivers/scsi/scsi_transport_spi.c index 10ebb213ddb3..871ea582029e 100644 --- a/drivers/scsi/scsi_transport_spi.c +++ b/drivers/scsi/scsi_transport_spi.c @@ -26,6 +26,7 @@ #include <linux/mutex.h> #include <linux/sysfs.h> #include <linux/slab.h> +#include <linux/suspend.h> #include <scsi/scsi.h> #include "scsi_priv.h" #include <scsi/scsi_device.h> @@ -1009,11 +1010,20 @@ spi_dv_device(struct scsi_device *sdev) u8 *buffer; const int len = SPI_MAX_ECHO_BUFFER_SIZE*2; + /* + * Because this function and the power management code both call + * scsi_device_quiesce(), it is not safe to perform domain validation + * while suspend or resume is in progress. Hence the + * lock/unlock_system_sleep() calls. + */ + lock_system_sleep(); + if (unlikely(spi_dv_in_progress(starget))) - return; + goto unlock; if (unlikely(scsi_device_get(sdev))) - return; + goto unlock; + spi_dv_in_progress(starget) = 1; buffer = kzalloc(len, GFP_KERNEL); @@ -1049,6 +1059,8 @@ spi_dv_device(struct scsi_device *sdev) out_put: spi_dv_in_progress(starget) = 0; scsi_device_put(sdev); +unlock: + unlock_system_sleep(); } EXPORT_SYMBOL(spi_dv_device); diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c index dc63aba092e4..dfd23245f778 100644 --- a/drivers/thermal/cpu_cooling.c +++ b/drivers/thermal/cpu_cooling.c @@ -88,7 +88,6 @@ struct time_in_idle { * @policy: cpufreq policy. * @node: list_head to link all cpufreq_cooling_device together. * @idle_time: idle time stats - * @plat_get_static_power: callback to calculate the static power * * This structure is required for keeping information of each registered * cpufreq_cooling_device. @@ -104,7 +103,6 @@ struct cpufreq_cooling_device { struct cpufreq_policy *policy; struct list_head node; struct time_in_idle *idle_time; - get_static_t plat_get_static_power; }; static DEFINE_IDA(cpufreq_ida); @@ -319,60 +317,6 @@ static u32 get_load(struct cpufreq_cooling_device *cpufreq_cdev, int cpu, } /** - * get_static_power() - calculate the static power consumed by the cpus - * @cpufreq_cdev: struct &cpufreq_cooling_device for this cpu cdev - * @tz: thermal zone device in which we're operating - * @freq: frequency in KHz - * @power: pointer in which to store the calculated static power - * - * Calculate the static power consumed by the cpus described by - * @cpu_actor running at frequency @freq. This function relies on a - * platform specific function that should have been provided when the - * actor was registered. If it wasn't, the static power is assumed to - * be negligible. The calculated static power is stored in @power. - * - * Return: 0 on success, -E* on failure. - */ -static int get_static_power(struct cpufreq_cooling_device *cpufreq_cdev, - struct thermal_zone_device *tz, unsigned long freq, - u32 *power) -{ - struct dev_pm_opp *opp; - unsigned long voltage; - struct cpufreq_policy *policy = cpufreq_cdev->policy; - struct cpumask *cpumask = policy->related_cpus; - unsigned long freq_hz = freq * 1000; - struct device *dev; - - if (!cpufreq_cdev->plat_get_static_power) { - *power = 0; - return 0; - } - - dev = get_cpu_device(policy->cpu); - WARN_ON(!dev); - - opp = dev_pm_opp_find_freq_exact(dev, freq_hz, true); - if (IS_ERR(opp)) { - dev_warn_ratelimited(dev, "Failed to find OPP for frequency %lu: %ld\n", - freq_hz, PTR_ERR(opp)); - return -EINVAL; - } - - voltage = dev_pm_opp_get_voltage(opp); - dev_pm_opp_put(opp); - - if (voltage == 0) { - dev_err_ratelimited(dev, "Failed to get voltage for frequency %lu\n", - freq_hz); - return -EINVAL; - } - - return cpufreq_cdev->plat_get_static_power(cpumask, tz->passive_delay, - voltage, power); -} - -/** * get_dynamic_power() - calculate the dynamic power * @cpufreq_cdev: &cpufreq_cooling_device for this cdev * @freq: current frequency @@ -491,8 +435,8 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, u32 *power) { unsigned long freq; - int i = 0, cpu, ret; - u32 static_power, dynamic_power, total_load = 0; + int i = 0, cpu; + u32 total_load = 0; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; u32 *load_cpu = NULL; @@ -522,22 +466,15 @@ static int cpufreq_get_requested_power(struct thermal_cooling_device *cdev, cpufreq_cdev->last_load = total_load; - dynamic_power = get_dynamic_power(cpufreq_cdev, freq); - ret = get_static_power(cpufreq_cdev, tz, freq, &static_power); - if (ret) { - kfree(load_cpu); - return ret; - } + *power = get_dynamic_power(cpufreq_cdev, freq); if (load_cpu) { trace_thermal_power_cpu_get_power(policy->related_cpus, freq, - load_cpu, i, dynamic_power, - static_power); + load_cpu, i, *power); kfree(load_cpu); } - *power = static_power + dynamic_power; return 0; } @@ -561,8 +498,6 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, unsigned long state, u32 *power) { unsigned int freq, num_cpus; - u32 static_power, dynamic_power; - int ret; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; /* Request state should be less than max_level */ @@ -572,13 +507,9 @@ static int cpufreq_state2power(struct thermal_cooling_device *cdev, num_cpus = cpumask_weight(cpufreq_cdev->policy->cpus); freq = cpufreq_cdev->freq_table[state].frequency; - dynamic_power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; - ret = get_static_power(cpufreq_cdev, tz, freq, &static_power); - if (ret) - return ret; + *power = cpu_freq_to_power(cpufreq_cdev, freq) * num_cpus; - *power = static_power + dynamic_power; - return ret; + return 0; } /** @@ -606,21 +537,14 @@ static int cpufreq_power2state(struct thermal_cooling_device *cdev, unsigned long *state) { unsigned int cur_freq, target_freq; - int ret; - s32 dyn_power; - u32 last_load, normalised_power, static_power; + u32 last_load, normalised_power; struct cpufreq_cooling_device *cpufreq_cdev = cdev->devdata; struct cpufreq_policy *policy = cpufreq_cdev->policy; cur_freq = cpufreq_quick_get(policy->cpu); - ret = get_static_power(cpufreq_cdev, tz, cur_freq, &static_power); - if (ret) - return ret; - - dyn_power = power - static_power; - dyn_power = dyn_power > 0 ? dyn_power : 0; + power = power > 0 ? power : 0; last_load = cpufreq_cdev->last_load ?: 1; - normalised_power = (dyn_power * 100) / last_load; + normalised_power = (power * 100) / last_load; target_freq = cpu_power_to_freq(cpufreq_cdev, normalised_power); *state = get_level(cpufreq_cdev, target_freq); @@ -671,8 +595,6 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, * @policy: cpufreq policy * Normally this should be same as cpufreq policy->related_cpus. * @capacitance: dynamic power coefficient for these cpus - * @plat_static_func: function to calculate the static power consumed by these - * cpus (optional) * * This interface function registers the cpufreq cooling device with the name * "thermal-cpufreq-%x". This api can support multiple instances of cpufreq @@ -684,8 +606,7 @@ static unsigned int find_next_max(struct cpufreq_frequency_table *table, */ static struct thermal_cooling_device * __cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, u32 capacitance, - get_static_t plat_static_func) + struct cpufreq_policy *policy, u32 capacitance) { struct thermal_cooling_device *cdev; struct cpufreq_cooling_device *cpufreq_cdev; @@ -755,8 +676,6 @@ __cpufreq_cooling_register(struct device_node *np, } if (capacitance) { - cpufreq_cdev->plat_get_static_power = plat_static_func; - ret = update_freq_table(cpufreq_cdev, capacitance); if (ret) { cdev = ERR_PTR(ret); @@ -813,13 +732,12 @@ free_cdev: struct thermal_cooling_device * cpufreq_cooling_register(struct cpufreq_policy *policy) { - return __cpufreq_cooling_register(NULL, policy, 0, NULL); + return __cpufreq_cooling_register(NULL, policy, 0); } EXPORT_SYMBOL_GPL(cpufreq_cooling_register); /** * of_cpufreq_cooling_register - function to create cpufreq cooling device. - * @np: a valid struct device_node to the cooling device device tree node * @policy: cpufreq policy * * This interface function registers the cpufreq cooling device with the name @@ -827,86 +745,45 @@ EXPORT_SYMBOL_GPL(cpufreq_cooling_register); * cooling devices. Using this API, the cpufreq cooling device will be * linked to the device tree node provided. * - * Return: a valid struct thermal_cooling_device pointer on success, - * on failure, it returns a corresponding ERR_PTR(). - */ -struct thermal_cooling_device * -of_cpufreq_cooling_register(struct device_node *np, - struct cpufreq_policy *policy) -{ - if (!np) - return ERR_PTR(-EINVAL); - - return __cpufreq_cooling_register(np, policy, 0, NULL); -} -EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); - -/** - * cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions - * @policy: cpufreq policy - * @capacitance: dynamic power coefficient for these cpus - * @plat_static_func: function to calculate the static power consumed by these - * cpus (optional) - * - * This interface function registers the cpufreq cooling device with - * the name "thermal-cpufreq-%x". This api can support multiple - * instances of cpufreq cooling devices. Using this function, the - * cooling device will implement the power extensions by using a - * simple cpu power model. The cpus must have registered their OPPs - * using the OPP library. - * - * An optional @plat_static_func may be provided to calculate the - * static power consumed by these cpus. If the platform's static - * power consumption is unknown or negligible, make it NULL. - * - * Return: a valid struct thermal_cooling_device pointer on success, - * on failure, it returns a corresponding ERR_PTR(). - */ -struct thermal_cooling_device * -cpufreq_power_cooling_register(struct cpufreq_policy *policy, u32 capacitance, - get_static_t plat_static_func) -{ - return __cpufreq_cooling_register(NULL, policy, capacitance, - plat_static_func); -} -EXPORT_SYMBOL(cpufreq_power_cooling_register); - -/** - * of_cpufreq_power_cooling_register() - create cpufreq cooling device with power extensions - * @np: a valid struct device_node to the cooling device device tree node - * @policy: cpufreq policy - * @capacitance: dynamic power coefficient for these cpus - * @plat_static_func: function to calculate the static power consumed by these - * cpus (optional) - * - * This interface function registers the cpufreq cooling device with - * the name "thermal-cpufreq-%x". This api can support multiple - * instances of cpufreq cooling devices. Using this API, the cpufreq - * cooling device will be linked to the device tree node provided. * Using this function, the cooling device will implement the power * extensions by using a simple cpu power model. The cpus must have * registered their OPPs using the OPP library. * - * An optional @plat_static_func may be provided to calculate the - * static power consumed by these cpus. If the platform's static - * power consumption is unknown or negligible, make it NULL. + * It also takes into account, if property present in policy CPU node, the + * static power consumed by the cpu. * * Return: a valid struct thermal_cooling_device pointer on success, - * on failure, it returns a corresponding ERR_PTR(). + * and NULL on failure. */ struct thermal_cooling_device * -of_cpufreq_power_cooling_register(struct device_node *np, - struct cpufreq_policy *policy, - u32 capacitance, - get_static_t plat_static_func) +of_cpufreq_cooling_register(struct cpufreq_policy *policy) { - if (!np) - return ERR_PTR(-EINVAL); + struct device_node *np = of_get_cpu_node(policy->cpu, NULL); + struct thermal_cooling_device *cdev = NULL; + u32 capacitance = 0; + + if (!np) { + pr_err("cpu_cooling: OF node not available for cpu%d\n", + policy->cpu); + return NULL; + } + + if (of_find_property(np, "#cooling-cells", NULL)) { + of_property_read_u32(np, "dynamic-power-coefficient", + &capacitance); - return __cpufreq_cooling_register(np, policy, capacitance, - plat_static_func); + cdev = __cpufreq_cooling_register(np, policy, capacitance); + if (IS_ERR(cdev)) { + pr_err("cpu_cooling: cpu%d is not running as cooling device: %ld\n", + policy->cpu, PTR_ERR(cdev)); + cdev = NULL; + } + } + + of_node_put(np); + return cdev; } -EXPORT_SYMBOL(of_cpufreq_power_cooling_register); +EXPORT_SYMBOL_GPL(of_cpufreq_cooling_register); /** * cpufreq_cooling_unregister - function to remove cpufreq cooling device. |