diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/bus.c | 2 | ||||
-rw-r--r-- | drivers/base/class.c | 2 | ||||
-rw-r--r-- | drivers/base/core.c | 16 | ||||
-rw-r--r-- | drivers/base/devtmpfs.c | 22 | ||||
-rw-r--r-- | drivers/base/driver.c | 4 | ||||
-rw-r--r-- | drivers/base/memory.c | 21 | ||||
-rw-r--r-- | drivers/base/platform.c | 1 | ||||
-rw-r--r-- | drivers/base/power/main.c | 272 | ||||
-rw-r--r-- | drivers/base/power/power.h | 6 | ||||
-rw-r--r-- | drivers/base/power/runtime.c | 90 | ||||
-rw-r--r-- | drivers/base/power/sysfs.c | 100 |
11 files changed, 481 insertions, 55 deletions
diff --git a/drivers/base/bus.c b/drivers/base/bus.c index 63c143e54a57..c0c5a43d9fb3 100644 --- a/drivers/base/bus.c +++ b/drivers/base/bus.c @@ -703,9 +703,9 @@ int bus_add_driver(struct device_driver *drv) return 0; out_unregister: + kobject_put(&priv->kobj); kfree(drv->p); drv->p = NULL; - kobject_put(&priv->kobj); out_put_bus: bus_put(bus); return error; diff --git a/drivers/base/class.c b/drivers/base/class.c index 161746deab4b..6e2c3b064f53 100644 --- a/drivers/base/class.c +++ b/drivers/base/class.c @@ -59,6 +59,8 @@ static void class_release(struct kobject *kobj) else pr_debug("class '%s' does not have a release() function, " "be careful\n", class->name); + + kfree(cp); } static struct sysfs_ops class_sysfs_ops = { diff --git a/drivers/base/core.c b/drivers/base/core.c index f1290cbd1350..282025770429 100644 --- a/drivers/base/core.c +++ b/drivers/base/core.c @@ -446,7 +446,8 @@ struct kset *devices_kset; * @dev: device. * @attr: device attribute descriptor. */ -int device_create_file(struct device *dev, struct device_attribute *attr) +int device_create_file(struct device *dev, + const struct device_attribute *attr) { int error = 0; if (dev) @@ -459,7 +460,8 @@ int device_create_file(struct device *dev, struct device_attribute *attr) * @dev: device. * @attr: device attribute descriptor. */ -void device_remove_file(struct device *dev, struct device_attribute *attr) +void device_remove_file(struct device *dev, + const struct device_attribute *attr) { if (dev) sysfs_remove_file(&dev->kobj, &attr->attr); @@ -470,7 +472,8 @@ void device_remove_file(struct device *dev, struct device_attribute *attr) * @dev: device. * @attr: device binary attribute descriptor. */ -int device_create_bin_file(struct device *dev, struct bin_attribute *attr) +int device_create_bin_file(struct device *dev, + const struct bin_attribute *attr) { int error = -EINVAL; if (dev) @@ -484,7 +487,8 @@ EXPORT_SYMBOL_GPL(device_create_bin_file); * @dev: device. * @attr: device binary attribute descriptor. */ -void device_remove_bin_file(struct device *dev, struct bin_attribute *attr) +void device_remove_bin_file(struct device *dev, + const struct bin_attribute *attr) { if (dev) sysfs_remove_bin_file(&dev->kobj, attr); @@ -905,8 +909,10 @@ int device_add(struct device *dev) dev->init_name = NULL; } - if (!dev_name(dev)) + if (!dev_name(dev)) { + error = -EINVAL; goto name_error; + } pr_debug("device: '%s': %s\n", dev_name(dev), __func__); diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c index 50375bb8e51d..42ae452b36b0 100644 --- a/drivers/base/devtmpfs.c +++ b/drivers/base/devtmpfs.c @@ -32,7 +32,7 @@ static int dev_mount = 1; static int dev_mount; #endif -static rwlock_t dirlock; +static DEFINE_MUTEX(dirlock); static int __init mount_param(char *str) { @@ -93,7 +93,7 @@ static int create_path(const char *nodepath) { int err; - read_lock(&dirlock); + mutex_lock(&dirlock); err = dev_mkdir(nodepath, 0755); if (err == -ENOENT) { char *path; @@ -101,8 +101,10 @@ static int create_path(const char *nodepath) /* parent directories do not exist, create them */ path = kstrdup(nodepath, GFP_KERNEL); - if (!path) - return -ENOMEM; + if (!path) { + err = -ENOMEM; + goto out; + } s = path; for (;;) { s = strchr(s, '/'); @@ -117,7 +119,8 @@ static int create_path(const char *nodepath) } kfree(path); } - read_unlock(&dirlock); +out: + mutex_unlock(&dirlock); return err; } @@ -229,7 +232,7 @@ static int delete_path(const char *nodepath) if (!path) return -ENOMEM; - write_lock(&dirlock); + mutex_lock(&dirlock); for (;;) { char *base; @@ -241,7 +244,7 @@ static int delete_path(const char *nodepath) if (err) break; } - write_unlock(&dirlock); + mutex_unlock(&dirlock); kfree(path); return err; @@ -351,8 +354,7 @@ int __init devtmpfs_init(void) { int err; struct vfsmount *mnt; - - rwlock_init(&dirlock); + char options[] = "mode=0755"; err = register_filesystem(&dev_fs_type); if (err) { @@ -361,7 +363,7 @@ int __init devtmpfs_init(void) return err; } - mnt = kern_mount_data(&dev_fs_type, "mode=0755"); + mnt = kern_mount_data(&dev_fs_type, options); if (IS_ERR(mnt)) { err = PTR_ERR(mnt); printk(KERN_ERR "devtmpfs: unable to create devtmpfs %i\n", err); diff --git a/drivers/base/driver.c b/drivers/base/driver.c index f367885a7646..90c9fff09ead 100644 --- a/drivers/base/driver.c +++ b/drivers/base/driver.c @@ -98,7 +98,7 @@ EXPORT_SYMBOL_GPL(driver_find_device); * @attr: driver attribute descriptor. */ int driver_create_file(struct device_driver *drv, - struct driver_attribute *attr) + const struct driver_attribute *attr) { int error; if (drv) @@ -115,7 +115,7 @@ EXPORT_SYMBOL_GPL(driver_create_file); * @attr: driver attribute descriptor. */ void driver_remove_file(struct device_driver *drv, - struct driver_attribute *attr) + const struct driver_attribute *attr) { if (drv) sysfs_remove_file(&drv->p->kobj, &attr->attr); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index c4c8f2e1dd15..bd025059711f 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -63,6 +63,20 @@ void unregister_memory_notifier(struct notifier_block *nb) } EXPORT_SYMBOL(unregister_memory_notifier); +static ATOMIC_NOTIFIER_HEAD(memory_isolate_chain); + +int register_memory_isolate_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&memory_isolate_chain, nb); +} +EXPORT_SYMBOL(register_memory_isolate_notifier); + +void unregister_memory_isolate_notifier(struct notifier_block *nb) +{ + atomic_notifier_chain_unregister(&memory_isolate_chain, nb); +} +EXPORT_SYMBOL(unregister_memory_isolate_notifier); + /* * register_memory - Setup a sysfs device for a memory block */ @@ -157,6 +171,11 @@ int memory_notify(unsigned long val, void *v) return blocking_notifier_call_chain(&memory_chain, val, v); } +int memory_isolate_notify(unsigned long val, void *v) +{ + return atomic_notifier_call_chain(&memory_isolate_chain, val, v); +} + /* * MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is * OK to have direct references to sparsemem variables in here. @@ -292,7 +311,7 @@ static SYSDEV_ATTR(removable, 0444, show_mem_removable, NULL); static ssize_t print_block_size(struct class *class, char *buf) { - return sprintf(buf, "%lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); + return sprintf(buf, "%#lx\n", (unsigned long)PAGES_PER_SECTION * PAGE_SIZE); } static CLASS_ATTR(block_size_bytes, 0444, print_block_size, NULL); diff --git a/drivers/base/platform.c b/drivers/base/platform.c index 9d2ee25deaf5..58efaf2f1259 100644 --- a/drivers/base/platform.c +++ b/drivers/base/platform.c @@ -441,6 +441,7 @@ error: platform_device_put(pdev); return ERR_PTR(retval); } +EXPORT_SYMBOL_GPL(platform_device_register_data); static int platform_drv_probe(struct device *_dev) { diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c index 1a216c114a0f..0e26a6f6fd48 100644 --- a/drivers/base/power/main.c +++ b/drivers/base/power/main.c @@ -25,6 +25,7 @@ #include <linux/resume-trace.h> #include <linux/interrupt.h> #include <linux/sched.h> +#include <linux/async.h> #include "../base.h" #include "power.h" @@ -42,6 +43,7 @@ LIST_HEAD(dpm_list); static DEFINE_MUTEX(dpm_list_mtx); +static pm_message_t pm_transition; /* * Set once the preparation of devices for a PM transition has started, reset @@ -56,6 +58,7 @@ static bool transition_started; void device_pm_init(struct device *dev) { dev->power.status = DPM_ON; + init_completion(&dev->power.completion); pm_runtime_init(dev); } @@ -111,6 +114,7 @@ void device_pm_remove(struct device *dev) pr_debug("PM: Removing info for %s:%s\n", dev->bus ? dev->bus->name : "No Bus", kobject_name(&dev->kobj)); + complete_all(&dev->power.completion); mutex_lock(&dpm_list_mtx); list_del_init(&dev->power.entry); mutex_unlock(&dpm_list_mtx); @@ -161,6 +165,57 @@ void device_pm_move_last(struct device *dev) list_move_tail(&dev->power.entry, &dpm_list); } +static ktime_t initcall_debug_start(struct device *dev) +{ + ktime_t calltime = ktime_set(0, 0); + + if (initcall_debug) { + pr_info("calling %s+ @ %i\n", + dev_name(dev), task_pid_nr(current)); + calltime = ktime_get(); + } + + return calltime; +} + +static void initcall_debug_report(struct device *dev, ktime_t calltime, + int error) +{ + ktime_t delta, rettime; + + if (initcall_debug) { + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); + pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), + error, (unsigned long long)ktime_to_ns(delta) >> 10); + } +} + +/** + * dpm_wait - Wait for a PM operation to complete. + * @dev: Device to wait for. + * @async: If unset, wait only if the device's power.async_suspend flag is set. + */ +static void dpm_wait(struct device *dev, bool async) +{ + if (!dev) + return; + + if (async || (pm_async_enabled && dev->power.async_suspend)) + wait_for_completion(&dev->power.completion); +} + +static int dpm_wait_fn(struct device *dev, void *async_ptr) +{ + dpm_wait(dev, *((bool *)async_ptr)); + return 0; +} + +static void dpm_wait_for_children(struct device *dev, bool async) +{ + device_for_each_child(dev, &async, dpm_wait_fn); +} + /** * pm_op - Execute the PM operation appropriate for given PM event. * @dev: Device to handle. @@ -172,13 +227,9 @@ static int pm_op(struct device *dev, pm_message_t state) { int error = 0; - ktime_t calltime, delta, rettime; + ktime_t calltime; - if (initcall_debug) { - pr_info("calling %s+ @ %i\n", - dev_name(dev), task_pid_nr(current)); - calltime = ktime_get(); - } + calltime = initcall_debug_start(dev); switch (state.event) { #ifdef CONFIG_SUSPEND @@ -227,12 +278,7 @@ static int pm_op(struct device *dev, error = -EINVAL; } - if (initcall_debug) { - rettime = ktime_get(); - delta = ktime_sub(rettime, calltime); - pr_info("call %s+ returned %d after %Ld usecs\n", dev_name(dev), - error, (unsigned long long)ktime_to_ns(delta) >> 10); - } + initcall_debug_report(dev, calltime, error); return error; } @@ -254,8 +300,9 @@ static int pm_noirq_op(struct device *dev, ktime_t calltime, delta, rettime; if (initcall_debug) { - pr_info("calling %s_i+ @ %i\n", - dev_name(dev), task_pid_nr(current)); + pr_info("calling %s+ @ %i, parent: %s\n", + dev_name(dev), task_pid_nr(current), + dev->parent ? dev_name(dev->parent) : "none"); calltime = ktime_get(); } @@ -309,8 +356,9 @@ static int pm_noirq_op(struct device *dev, if (initcall_debug) { rettime = ktime_get(); delta = ktime_sub(rettime, calltime); - printk("initcall %s_i+ returned %d after %Ld usecs\n", dev_name(dev), - error, (unsigned long long)ktime_to_ns(delta) >> 10); + printk("initcall %s_i+ returned %d after %Ld usecs\n", + dev_name(dev), error, + (unsigned long long)ktime_to_ns(delta) >> 10); } return error; @@ -354,6 +402,23 @@ static void pm_dev_err(struct device *dev, pm_message_t state, char *info, kobject_name(&dev->kobj), pm_verb(state.event), info, error); } +static void dpm_show_time(ktime_t starttime, pm_message_t state, char *info) +{ + ktime_t calltime; + s64 usecs64; + int usecs; + + calltime = ktime_get(); + usecs64 = ktime_to_ns(ktime_sub(calltime, starttime)); + do_div(usecs64, NSEC_PER_USEC); + usecs = usecs64; + if (usecs == 0) + usecs = 1; + pr_info("PM: %s%s%s of devices complete after %ld.%03ld msecs\n", + info ?: "", info ? " " : "", pm_verb(state.event), + usecs / USEC_PER_MSEC, usecs % USEC_PER_MSEC); +} + /*------------------------- Resume routines -------------------------*/ /** @@ -390,6 +455,7 @@ static int device_resume_noirq(struct device *dev, pm_message_t state) void dpm_resume_noirq(pm_message_t state) { struct device *dev; + ktime_t starttime = ktime_get(); mutex_lock(&dpm_list_mtx); transition_started = false; @@ -403,31 +469,56 @@ void dpm_resume_noirq(pm_message_t state) pm_dev_err(dev, state, " early", error); } mutex_unlock(&dpm_list_mtx); + dpm_show_time(starttime, state, "early"); resume_device_irqs(); } EXPORT_SYMBOL_GPL(dpm_resume_noirq); /** + * legacy_resume - Execute a legacy (bus or class) resume callback for device. + * @dev: Device to resume. + * @cb: Resume callback to execute. + */ +static int legacy_resume(struct device *dev, int (*cb)(struct device *dev)) +{ + int error; + ktime_t calltime; + + calltime = initcall_debug_start(dev); + + error = cb(dev); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + +/** * device_resume - Execute "resume" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. + * @async: If true, the device is being resumed asynchronously. */ -static int device_resume(struct device *dev, pm_message_t state) +static int device_resume(struct device *dev, pm_message_t state, bool async) { int error = 0; TRACE_DEVICE(dev); TRACE_RESUME(0); + dpm_wait(dev->parent, async); down(&dev->sem); + dev->power.status = DPM_RESUMING; + if (dev->bus) { if (dev->bus->pm) { pm_dev_dbg(dev, state, ""); error = pm_op(dev, dev->bus->pm, state); } else if (dev->bus->resume) { pm_dev_dbg(dev, state, "legacy "); - error = dev->bus->resume(dev); + error = legacy_resume(dev, dev->bus->resume); } if (error) goto End; @@ -448,16 +539,34 @@ static int device_resume(struct device *dev, pm_message_t state) error = pm_op(dev, dev->class->pm, state); } else if (dev->class->resume) { pm_dev_dbg(dev, state, "legacy class "); - error = dev->class->resume(dev); + error = legacy_resume(dev, dev->class->resume); } } End: up(&dev->sem); + complete_all(&dev->power.completion); TRACE_RESUME(error); return error; } +static void async_resume(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = device_resume(dev, pm_transition, true); + if (error) + pm_dev_err(dev, pm_transition, " async", error); + put_device(dev); +} + +static bool is_async(struct device *dev) +{ + return dev->power.async_suspend && pm_async_enabled + && !pm_trace_is_enabled(); +} + /** * dpm_resume - Execute "resume" callbacks for non-sysdev devices. * @state: PM transition of the system being carried out. @@ -468,20 +577,33 @@ static int device_resume(struct device *dev, pm_message_t state) static void dpm_resume(pm_message_t state) { struct list_head list; + struct device *dev; + ktime_t starttime = ktime_get(); INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); - while (!list_empty(&dpm_list)) { - struct device *dev = to_device(dpm_list.next); + pm_transition = state; + + list_for_each_entry(dev, &dpm_list, power.entry) { + if (dev->power.status < DPM_OFF) + continue; + + INIT_COMPLETION(dev->power.completion); + if (is_async(dev)) { + get_device(dev); + async_schedule(async_resume, dev); + } + } + while (!list_empty(&dpm_list)) { + dev = to_device(dpm_list.next); get_device(dev); - if (dev->power.status >= DPM_OFF) { + if (dev->power.status >= DPM_OFF && !is_async(dev)) { int error; - dev->power.status = DPM_RESUMING; mutex_unlock(&dpm_list_mtx); - error = device_resume(dev, state); + error = device_resume(dev, state, false); mutex_lock(&dpm_list_mtx); if (error) @@ -496,6 +618,8 @@ static void dpm_resume(pm_message_t state) } list_splice(&list, &dpm_list); mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + dpm_show_time(starttime, state, NULL); } /** @@ -548,7 +672,7 @@ static void dpm_complete(pm_message_t state) mutex_unlock(&dpm_list_mtx); device_complete(dev, state); - pm_runtime_put_noidle(dev); + pm_runtime_put_sync(dev); mutex_lock(&dpm_list_mtx); } @@ -628,6 +752,7 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state) int dpm_suspend_noirq(pm_message_t state) { struct device *dev; + ktime_t starttime = ktime_get(); int error = 0; suspend_device_irqs(); @@ -643,29 +768,59 @@ int dpm_suspend_noirq(pm_message_t state) mutex_unlock(&dpm_list_mtx); if (error) dpm_resume_noirq(resume_event(state)); + else + dpm_show_time(starttime, state, "late"); return error; } EXPORT_SYMBOL_GPL(dpm_suspend_noirq); /** + * legacy_suspend - Execute a legacy (bus or class) suspend callback for device. + * @dev: Device to suspend. + * @state: PM transition of the system being carried out. + * @cb: Suspend callback to execute. + */ +static int legacy_suspend(struct device *dev, pm_message_t state, + int (*cb)(struct device *dev, pm_message_t state)) +{ + int error; + ktime_t calltime; + + calltime = initcall_debug_start(dev); + + error = cb(dev, state); + suspend_report_result(cb, error); + + initcall_debug_report(dev, calltime, error); + + return error; +} + +static int async_error; + +/** * device_suspend - Execute "suspend" callbacks for given device. * @dev: Device to handle. * @state: PM transition of the system being carried out. + * @async: If true, the device is being suspended asynchronously. */ -static int device_suspend(struct device *dev, pm_message_t state) +static int __device_suspend(struct device *dev, pm_message_t state, bool async) { int error = 0; + dpm_wait_for_children(dev, async); down(&dev->sem); + if (async_error) + goto End; + if (dev->class) { if (dev->class->pm) { pm_dev_dbg(dev, state, "class "); error = pm_op(dev, dev->class->pm, state); } else if (dev->class->suspend) { pm_dev_dbg(dev, state, "legacy class "); - error = dev->class->suspend(dev, state); - suspend_report_result(dev->class->suspend, error); + error = legacy_suspend(dev, state, dev->class->suspend); } if (error) goto End; @@ -686,16 +841,47 @@ static int device_suspend(struct device *dev, pm_message_t state) error = pm_op(dev, dev->bus->pm, state); } else if (dev->bus->suspend) { pm_dev_dbg(dev, state, "legacy "); - error = dev->bus->suspend(dev, state); - suspend_report_result(dev->bus->suspend, error); + error = legacy_suspend(dev, state, dev->bus->suspend); } } + + if (!error) + dev->power.status = DPM_OFF; + End: up(&dev->sem); + complete_all(&dev->power.completion); return error; } +static void async_suspend(void *data, async_cookie_t cookie) +{ + struct device *dev = (struct device *)data; + int error; + + error = __device_suspend(dev, pm_transition, true); + if (error) { + pm_dev_err(dev, pm_transition, " async", error); + async_error = error; + } + + put_device(dev); +} + +static int device_suspend(struct device *dev) +{ + INIT_COMPLETION(dev->power.completion); + + if (pm_async_enabled && dev->power.async_suspend) { + get_device(dev); + async_schedule(async_suspend, dev); + return 0; + } + + return __device_suspend(dev, pm_transition, false); +} + /** * dpm_suspend - Execute "suspend" callbacks for all non-sysdev devices. * @state: PM transition of the system being carried out. @@ -703,17 +889,20 @@ static int device_suspend(struct device *dev, pm_message_t state) static int dpm_suspend(pm_message_t state) { struct list_head list; + ktime_t starttime = ktime_get(); int error = 0; INIT_LIST_HEAD(&list); mutex_lock(&dpm_list_mtx); + pm_transition = state; + async_error = 0; while (!list_empty(&dpm_list)) { struct device *dev = to_device(dpm_list.prev); get_device(dev); mutex_unlock(&dpm_list_mtx); - error = device_suspend(dev, state); + error = device_suspend(dev); mutex_lock(&dpm_list_mtx); if (error) { @@ -721,13 +910,19 @@ static int dpm_suspend(pm_message_t state) put_device(dev); break; } - dev->power.status = DPM_OFF; if (!list_empty(&dev->power.entry)) list_move(&dev->power.entry, &list); put_device(dev); + if (async_error) + break; } list_splice(&list, dpm_list.prev); mutex_unlock(&dpm_list_mtx); + async_synchronize_full(); + if (!error) + error = async_error; + if (!error) + dpm_show_time(starttime, state, NULL); return error; } @@ -796,7 +991,7 @@ static int dpm_prepare(pm_message_t state) pm_runtime_get_noresume(dev); if (pm_runtime_barrier(dev) && device_may_wakeup(dev)) { /* Wake-up requested during system sleep transition. */ - pm_runtime_put_noidle(dev); + pm_runtime_put_sync(dev); error = -EBUSY; } else { error = device_prepare(dev, state); @@ -851,3 +1046,14 @@ void __suspend_report_result(const char *function, void *fn, int ret) printk(KERN_ERR "%s(): %pF returns %d\n", function, fn, ret); } EXPORT_SYMBOL_GPL(__suspend_report_result); + +/** + * device_pm_wait_for_dev - Wait for suspend/resume of a device to complete. + * @dev: Device to wait for. + * @subordinate: Device that needs to wait for @dev. + */ +void device_pm_wait_for_dev(struct device *subordinate, struct device *dev) +{ + dpm_wait(dev, subordinate->power.async_suspend); +} +EXPORT_SYMBOL_GPL(device_pm_wait_for_dev); diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index b8fa1aa5225a..c0bd03c83b9c 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -12,10 +12,10 @@ static inline void pm_runtime_remove(struct device *dev) {} #ifdef CONFIG_PM_SLEEP -/* - * main.c - */ +/* kernel/power/main.c */ +extern int pm_async_enabled; +/* drivers/base/power/main.c */ extern struct list_head dpm_list; /* The active device list */ static inline struct device *to_device(struct list_head *entry) diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index 40d7720a4b21..626dd147b75f 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -85,6 +85,19 @@ static int __pm_runtime_idle(struct device *dev) dev->bus->pm->runtime_idle(dev); spin_lock_irq(&dev->power.lock); + } else if (dev->type && dev->type->pm && dev->type->pm->runtime_idle) { + spin_unlock_irq(&dev->power.lock); + + dev->type->pm->runtime_idle(dev); + + spin_lock_irq(&dev->power.lock); + } else if (dev->class && dev->class->pm + && dev->class->pm->runtime_idle) { + spin_unlock_irq(&dev->power.lock); + + dev->class->pm->runtime_idle(dev); + + spin_lock_irq(&dev->power.lock); } dev->power.idle_notification = false; @@ -194,6 +207,22 @@ int __pm_runtime_suspend(struct device *dev, bool from_wq) spin_lock_irq(&dev->power.lock); dev->power.runtime_error = retval; + } else if (dev->type && dev->type->pm + && dev->type->pm->runtime_suspend) { + spin_unlock_irq(&dev->power.lock); + + retval = dev->type->pm->runtime_suspend(dev); + + spin_lock_irq(&dev->power.lock); + dev->power.runtime_error = retval; + } else if (dev->class && dev->class->pm + && dev->class->pm->runtime_suspend) { + spin_unlock_irq(&dev->power.lock); + + retval = dev->class->pm->runtime_suspend(dev); + + spin_lock_irq(&dev->power.lock); + dev->power.runtime_error = retval; } else { retval = -ENOSYS; } @@ -359,6 +388,22 @@ int __pm_runtime_resume(struct device *dev, bool from_wq) spin_lock_irq(&dev->power.lock); dev->power.runtime_error = retval; + } else if (dev->type && dev->type->pm + && dev->type->pm->runtime_resume) { + spin_unlock_irq(&dev->power.lock); + + retval = dev->type->pm->runtime_resume(dev); + + spin_lock_irq(&dev->power.lock); + dev->power.runtime_error = retval; + } else if (dev->class && dev->class->pm + && dev->class->pm->runtime_resume) { + spin_unlock_irq(&dev->power.lock); + + retval = dev->class->pm->runtime_resume(dev); + + spin_lock_irq(&dev->power.lock); + dev->power.runtime_error = retval; } else { retval = -ENOSYS; } @@ -966,6 +1011,50 @@ void pm_runtime_enable(struct device *dev) EXPORT_SYMBOL_GPL(pm_runtime_enable); /** + * pm_runtime_forbid - Block run-time PM of a device. + * @dev: Device to handle. + * + * Increase the device's usage count and clear its power.runtime_auto flag, + * so that it cannot be suspended at run time until pm_runtime_allow() is called + * for it. + */ +void pm_runtime_forbid(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (!dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = false; + atomic_inc(&dev->power.usage_count); + __pm_runtime_resume(dev, false); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_forbid); + +/** + * pm_runtime_allow - Unblock run-time PM of a device. + * @dev: Device to handle. + * + * Decrease the device's usage count and set its power.runtime_auto flag. + */ +void pm_runtime_allow(struct device *dev) +{ + spin_lock_irq(&dev->power.lock); + if (dev->power.runtime_auto) + goto out; + + dev->power.runtime_auto = true; + if (atomic_dec_and_test(&dev->power.usage_count)) + __pm_runtime_idle(dev); + + out: + spin_unlock_irq(&dev->power.lock); +} +EXPORT_SYMBOL_GPL(pm_runtime_allow); + +/** * pm_runtime_init - Initialize run-time PM fields in given device object. * @dev: Device object to initialize. */ @@ -983,6 +1072,7 @@ void pm_runtime_init(struct device *dev) atomic_set(&dev->power.child_count, 0); pm_suspend_ignore_children(dev, false); + dev->power.runtime_auto = true; dev->power.request_pending = false; dev->power.request = RPM_REQ_NONE; diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c index 596aeecfdffe..86fd9373447e 100644 --- a/drivers/base/power/sysfs.c +++ b/drivers/base/power/sysfs.c @@ -4,9 +4,25 @@ #include <linux/device.h> #include <linux/string.h> +#include <linux/pm_runtime.h> #include "power.h" /* + * control - Report/change current runtime PM setting of the device + * + * Runtime power management of a device can be blocked with the help of + * this attribute. All devices have one of the following two values for + * the power/control file: + * + * + "auto\n" to allow the device to be power managed at run time; + * + "on\n" to prevent the device from being power managed at run time; + * + * The default for all devices is "auto", which means that devices may be + * subject to automatic power management, depending on their drivers. + * Changing this attribute to "on" prevents the driver from power managing + * the device at run time. Doing that while the device is suspended causes + * it to be woken up. + * * wakeup - Report/change current wakeup option for device * * Some devices support "wakeup" events, which are hardware signals @@ -38,11 +54,61 @@ * wakeup events internally (unless they are disabled), keeping * their hardware in low power modes whenever they're unused. This * saves runtime power, without requiring system-wide sleep states. + * + * async - Report/change current async suspend setting for the device + * + * Asynchronous suspend and resume of the device during system-wide power + * state transitions can be enabled by writing "enabled" to this file. + * Analogously, if "disabled" is written to this file, the device will be + * suspended and resumed synchronously. + * + * All devices have one of the following two values for power/async: + * + * + "enabled\n" to permit the asynchronous suspend/resume of the device; + * + "disabled\n" to forbid it; + * + * NOTE: It generally is unsafe to permit the asynchronous suspend/resume + * of a device unless it is certain that all of the PM dependencies of the + * device are known to the PM core. However, for some devices this + * attribute is set to "enabled" by bus type code or device drivers and in + * that cases it should be safe to leave the default value. */ static const char enabled[] = "enabled"; static const char disabled[] = "disabled"; +#ifdef CONFIG_PM_RUNTIME +static const char ctrl_auto[] = "auto"; +static const char ctrl_on[] = "on"; + +static ssize_t control_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", + dev->power.runtime_auto ? ctrl_auto : ctrl_on); +} + +static ssize_t control_store(struct device * dev, struct device_attribute *attr, + const char * buf, size_t n) +{ + char *cp; + int len = n; + + cp = memchr(buf, '\n', n); + if (cp) + len = cp - buf; + if (len == sizeof ctrl_auto - 1 && strncmp(buf, ctrl_auto, len) == 0) + pm_runtime_allow(dev); + else if (len == sizeof ctrl_on - 1 && strncmp(buf, ctrl_on, len) == 0) + pm_runtime_forbid(dev); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR(control, 0644, control_show, control_store); +#endif + static ssize_t wake_show(struct device * dev, struct device_attribute *attr, char * buf) { @@ -77,9 +143,43 @@ wake_store(struct device * dev, struct device_attribute *attr, static DEVICE_ATTR(wakeup, 0644, wake_show, wake_store); +#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG +static ssize_t async_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sprintf(buf, "%s\n", + device_async_suspend_enabled(dev) ? enabled : disabled); +} + +static ssize_t async_store(struct device *dev, struct device_attribute *attr, + const char *buf, size_t n) +{ + char *cp; + int len = n; + + cp = memchr(buf, '\n', n); + if (cp) + len = cp - buf; + if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0) + device_enable_async_suspend(dev); + else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0) + device_disable_async_suspend(dev); + else + return -EINVAL; + return n; +} + +static DEVICE_ATTR(async, 0644, async_show, async_store); +#endif /* CONFIG_PM_SLEEP_ADVANCED_DEBUG */ static struct attribute * power_attrs[] = { +#ifdef CONFIG_PM_RUNTIME + &dev_attr_control.attr, +#endif &dev_attr_wakeup.attr, +#ifdef CONFIG_PM_SLEEP_ADVANCED_DEBUG + &dev_attr_async.attr, +#endif NULL, }; static struct attribute_group pm_attr_group = { |