summaryrefslogtreecommitdiff
path: root/drivers/base
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig27
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/attribute_container.c14
-rw-r--r--drivers/base/core.c16
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/devcoredump.c265
-rw-r--r--drivers/base/devres.c15
-rw-r--r--drivers/base/dma-coherent.c151
-rw-r--r--drivers/base/dma-contiguous.c67
-rw-r--r--drivers/base/dma-mapping.c72
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/base/memory.c42
-rw-r--r--drivers/base/node.c4
-rw-r--r--drivers/base/platform.c16
-rw-r--r--drivers/base/power/clock_ops.c19
-rw-r--r--drivers/base/power/common.c52
-rw-r--r--drivers/base/power/domain.c903
-rw-r--r--drivers/base/power/domain_governor.c7
-rw-r--r--drivers/base/power/main.c10
-rw-r--r--drivers/base/power/sysfs.c24
-rw-r--r--drivers/base/power/wakeup.c16
-rw-r--r--drivers/base/syscore.c7
22 files changed, 1298 insertions, 435 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 4e7f0ff83ae7..df04227d00cf 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -165,6 +165,30 @@ config FW_LOADER_USER_HELPER_FALLBACK
If you are unsure about this, say N here.
+config WANT_DEV_COREDUMP
+ bool
+ help
+ Drivers should "select" this option if they desire to use the
+ device coredump mechanism.
+
+config ALLOW_DEV_COREDUMP
+ bool "Allow device coredump" if EXPERT
+ default y
+ help
+ This option controls if the device coredump mechanism is available or
+ not; if disabled, the mechanism will be omitted even if drivers that
+ can use it are enabled.
+ Say 'N' for more sensitive systems or systems that don't want
+ to ever access the information to not have the code, nor keep any
+ data.
+
+ If unsure, say Y.
+
+config DEV_COREDUMP
+ bool
+ default y if WANT_DEV_COREDUMP
+ depends on ALLOW_DEV_COREDUMP
+
config DEBUG_DRIVER
bool "Driver Core verbose debug messages"
depends on DEBUG_KERNEL
@@ -231,6 +255,9 @@ config DMA_CMA
to allocate big physically-contiguous blocks of memory for use with
hardware components that do not support I/O map nor scatter-gather.
+ You can disable CMA by specifying "cma=0" on the kernel's command
+ line.
+
For more information see <include/linux/dma-contiguous.h>.
If unsure, say "n".
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 4aab26ec0292..6922cd6850a2 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor.o
obj-$(CONFIG_REGMAP) += regmap/
obj-$(CONFIG_SOC_BUS) += soc.o
obj-$(CONFIG_PINCTRL) += pinctrl.o
+obj-$(CONFIG_DEV_COREDUMP) += devcoredump.o
ccflags-$(CONFIG_DEBUG_DRIVER) := -DDEBUG
diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
index b84ca8f13f9e..3ead3af4be61 100644
--- a/drivers/base/attribute_container.c
+++ b/drivers/base/attribute_container.c
@@ -74,9 +74,9 @@ int
attribute_container_register(struct attribute_container *cont)
{
INIT_LIST_HEAD(&cont->node);
- klist_init(&cont->containers,internal_container_klist_get,
+ klist_init(&cont->containers, internal_container_klist_get,
internal_container_klist_put);
-
+
mutex_lock(&attribute_container_mutex);
list_add_tail(&cont->node, &attribute_container_list);
mutex_unlock(&attribute_container_mutex);
@@ -104,14 +104,14 @@ attribute_container_unregister(struct attribute_container *cont)
spin_unlock(&cont->containers.k_lock);
mutex_unlock(&attribute_container_mutex);
return retval;
-
+
}
EXPORT_SYMBOL_GPL(attribute_container_unregister);
/* private function used as class release */
static void attribute_container_release(struct device *classdev)
{
- struct internal_container *ic
+ struct internal_container *ic
= container_of(classdev, struct internal_container, classdev);
struct device *dev = classdev->parent;
@@ -184,8 +184,8 @@ attribute_container_add_device(struct device *dev,
struct klist_node *n = klist_next(iter); \
n ? container_of(n, typeof(*pos), member) : \
({ klist_iter_exit(iter) ; NULL; }); \
- }) ) != NULL; )
-
+ })) != NULL;)
+
/**
* attribute_container_remove_device - make device eligible for removal.
@@ -247,7 +247,7 @@ attribute_container_remove_device(struct device *dev,
* container, then use attribute_container_trigger() instead.
*/
void
-attribute_container_device_trigger(struct device *dev,
+attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *))
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 20da3ad1696b..842d04707de6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
return &dir->kobj;
}
+static DEFINE_MUTEX(gdp_mutex);
static struct kobject *get_device_parent(struct device *dev,
struct device *parent)
{
if (dev->class) {
- static DEFINE_MUTEX(gdp_mutex);
struct kobject *kobj = NULL;
struct kobject *parent_kobj;
struct kobject *k;
@@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
glue_dir->kset != &dev->class->p->glue_dirs)
return;
+ mutex_lock(&gdp_mutex);
kobject_put(glue_dir);
+ mutex_unlock(&gdp_mutex);
}
static void cleanup_device_parent(struct device *dev)
@@ -1211,6 +1213,9 @@ void device_del(struct device *dev)
*/
if (platform_notify_remove)
platform_notify_remove(dev);
+ if (dev->bus)
+ blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
+ BUS_NOTIFY_REMOVED_DEVICE, dev);
kobject_uevent(&dev->kobj, KOBJ_REMOVE);
cleanup_device_parent(dev);
kobject_del(&dev->kobj);
@@ -2007,6 +2012,8 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
return 0;
pos += snprintf(hdr + pos, hdrlen - pos, "SUBSYSTEM=%s", subsys);
+ if (pos >= hdrlen)
+ goto overflow;
/*
* Add device identifier DEVICE=:
@@ -2038,7 +2045,14 @@ create_syslog_header(const struct device *dev, char *hdr, size_t hdrlen)
"DEVICE=+%s:%s", subsys, dev_name(dev));
}
+ if (pos >= hdrlen)
+ goto overflow;
+
return pos;
+
+overflow:
+ dev_WARN(dev, "device/subsystem name too long");
+ return 0;
}
int dev_vprintk_emit(int level, const struct device *dev,
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index e4ffbcf2f519..cdc779cf79a3 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -54,7 +54,7 @@ static LIST_HEAD(deferred_probe_active_list);
static struct workqueue_struct *deferred_wq;
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-/**
+/*
* deferred_probe_work_func() - Retry probing devices in the active list.
*/
static void deferred_probe_work_func(struct work_struct *work)
diff --git a/drivers/base/devcoredump.c b/drivers/base/devcoredump.c
new file mode 100644
index 000000000000..96614b04544c
--- /dev/null
+++ b/drivers/base/devcoredump.c
@@ -0,0 +1,265 @@
+/*
+ * This file is provided under the GPLv2 license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * Copyright(c) 2014 Intel Mobile Communications GmbH
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * The full GNU General Public License is included in this distribution
+ * in the file called COPYING.
+ *
+ * Contact Information:
+ * Intel Linux Wireless <ilw@linux.intel.com>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ * Author: Johannes Berg <johannes@sipsolutions.net>
+ */
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/devcoredump.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/workqueue.h>
+
+/* if data isn't read by userspace after 5 minutes then delete it */
+#define DEVCD_TIMEOUT (HZ * 60 * 5)
+
+struct devcd_entry {
+ struct device devcd_dev;
+ const void *data;
+ size_t datalen;
+ struct module *owner;
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen);
+ void (*free)(const void *data);
+ struct delayed_work del_wk;
+ struct device *failing_dev;
+};
+
+static struct devcd_entry *dev_to_devcd(struct device *dev)
+{
+ return container_of(dev, struct devcd_entry, devcd_dev);
+}
+
+static void devcd_dev_release(struct device *dev)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ devcd->free(devcd->data);
+ module_put(devcd->owner);
+
+ /*
+ * this seems racy, but I don't see a notifier or such on
+ * a struct device to know when it goes away?
+ */
+ if (devcd->failing_dev->kobj.sd)
+ sysfs_delete_link(&devcd->failing_dev->kobj, &dev->kobj,
+ "devcoredump");
+
+ put_device(devcd->failing_dev);
+ kfree(devcd);
+}
+
+static void devcd_del(struct work_struct *wk)
+{
+ struct devcd_entry *devcd;
+
+ devcd = container_of(wk, struct devcd_entry, del_wk.work);
+
+ device_del(&devcd->devcd_dev);
+ put_device(&devcd->devcd_dev);
+}
+
+static ssize_t devcd_data_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ return devcd->read(buffer, offset, count, devcd->data, devcd->datalen);
+}
+
+static ssize_t devcd_data_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *bin_attr,
+ char *buffer, loff_t offset, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ mod_delayed_work(system_wq, &devcd->del_wk, 0);
+
+ return count;
+}
+
+static struct bin_attribute devcd_attr_data = {
+ .attr = { .name = "data", .mode = S_IRUSR | S_IWUSR, },
+ .size = 0,
+ .read = devcd_data_read,
+ .write = devcd_data_write,
+};
+
+static struct bin_attribute *devcd_dev_bin_attrs[] = {
+ &devcd_attr_data, NULL,
+};
+
+static const struct attribute_group devcd_dev_group = {
+ .bin_attrs = devcd_dev_bin_attrs,
+};
+
+static const struct attribute_group *devcd_dev_groups[] = {
+ &devcd_dev_group, NULL,
+};
+
+static struct class devcd_class = {
+ .name = "devcoredump",
+ .owner = THIS_MODULE,
+ .dev_release = devcd_dev_release,
+ .dev_groups = devcd_dev_groups,
+};
+
+static ssize_t devcd_readv(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen)
+{
+ if (offset > datalen)
+ return -EINVAL;
+
+ if (offset + count > datalen)
+ count = datalen - offset;
+
+ if (count)
+ memcpy(buffer, ((u8 *)data) + offset, count);
+
+ return count;
+}
+
+/**
+ * dev_coredumpv - create device coredump with vmalloc data
+ * @dev: the struct device for the crashed device
+ * @data: vmalloc data containing the device coredump
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ *
+ * This function takes ownership of the vmalloc'ed data and will free
+ * it when it is no longer used. See dev_coredumpm() for more information.
+ */
+void dev_coredumpv(struct device *dev, const void *data, size_t datalen,
+ gfp_t gfp)
+{
+ dev_coredumpm(dev, NULL, data, datalen, gfp, devcd_readv, vfree);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpv);
+
+static int devcd_match_failing(struct device *dev, const void *failing)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ return devcd->failing_dev == failing;
+}
+
+/**
+ * dev_coredumpm - create device coredump with read/free methods
+ * @dev: the struct device for the crashed device
+ * @owner: the module that contains the read/free functions, use %THIS_MODULE
+ * @data: data cookie for the @read/@free functions
+ * @datalen: length of the data
+ * @gfp: allocation flags
+ * @read: function to read from the given buffer
+ * @free: function to free the given buffer
+ *
+ * Creates a new device coredump for the given device. If a previous one hasn't
+ * been read yet, the new coredump is discarded. The data lifetime is determined
+ * by the device coredump framework and when it is no longer needed the @free
+ * function will be called to free the data.
+ */
+void dev_coredumpm(struct device *dev, struct module *owner,
+ const void *data, size_t datalen, gfp_t gfp,
+ ssize_t (*read)(char *buffer, loff_t offset, size_t count,
+ const void *data, size_t datalen),
+ void (*free)(const void *data))
+{
+ static atomic_t devcd_count = ATOMIC_INIT(0);
+ struct devcd_entry *devcd;
+ struct device *existing;
+
+ existing = class_find_device(&devcd_class, NULL, dev,
+ devcd_match_failing);
+ if (existing) {
+ put_device(existing);
+ goto free;
+ }
+
+ if (!try_module_get(owner))
+ goto free;
+
+ devcd = kzalloc(sizeof(*devcd), gfp);
+ if (!devcd)
+ goto put_module;
+
+ devcd->owner = owner;
+ devcd->data = data;
+ devcd->datalen = datalen;
+ devcd->read = read;
+ devcd->free = free;
+ devcd->failing_dev = get_device(dev);
+
+ device_initialize(&devcd->devcd_dev);
+
+ dev_set_name(&devcd->devcd_dev, "devcd%d",
+ atomic_inc_return(&devcd_count));
+ devcd->devcd_dev.class = &devcd_class;
+
+ if (device_add(&devcd->devcd_dev))
+ goto put_device;
+
+ if (sysfs_create_link(&devcd->devcd_dev.kobj, &dev->kobj,
+ "failing_device"))
+ /* nothing - symlink will be missing */;
+
+ if (sysfs_create_link(&dev->kobj, &devcd->devcd_dev.kobj,
+ "devcoredump"))
+ /* nothing - symlink will be missing */;
+
+ INIT_DELAYED_WORK(&devcd->del_wk, devcd_del);
+ schedule_delayed_work(&devcd->del_wk, DEVCD_TIMEOUT);
+
+ return;
+ put_device:
+ put_device(&devcd->devcd_dev);
+ put_module:
+ module_put(owner);
+ free:
+ free(data);
+}
+EXPORT_SYMBOL_GPL(dev_coredumpm);
+
+static int __init devcoredump_init(void)
+{
+ return class_register(&devcd_class);
+}
+__initcall(devcoredump_init);
+
+static int devcd_free(struct device *dev, void *data)
+{
+ struct devcd_entry *devcd = dev_to_devcd(dev);
+
+ flush_delayed_work(&devcd->del_wk);
+ return 0;
+}
+
+static void __exit devcoredump_exit(void)
+{
+ class_for_each_device(&devcd_class, NULL, NULL, devcd_free);
+ class_unregister(&devcd_class);
+}
+__exitcall(devcoredump_exit);
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
index 69d9b0c89a01..c8a53d1e019f 100644
--- a/drivers/base/devres.c
+++ b/drivers/base/devres.c
@@ -817,13 +817,13 @@ char *devm_kstrdup(struct device *dev, const char *s, gfp_t gfp)
EXPORT_SYMBOL_GPL(devm_kstrdup);
/**
- * devm_kvasprintf - Allocate resource managed space
- * for the formatted string.
+ * devm_kvasprintf - Allocate resource managed space and format a string
+ * into that.
* @dev: Device to allocate memory for
* @gfp: the GFP mask used in the devm_kmalloc() call when
* allocating memory
- * @fmt: the formatted string to duplicate
- * @ap: the list of tokens to be placed in the formatted string
+ * @fmt: The printf()-style format string
+ * @ap: Arguments for the format string
* RETURNS:
* Pointer to allocated string on success, NULL on failure.
*/
@@ -849,12 +849,13 @@ char *devm_kvasprintf(struct device *dev, gfp_t gfp, const char *fmt,
EXPORT_SYMBOL(devm_kvasprintf);
/**
- * devm_kasprintf - Allocate resource managed space
- * and copy an existing formatted string into that
+ * devm_kasprintf - Allocate resource managed space and format a string
+ * into that.
* @dev: Device to allocate memory for
* @gfp: the GFP mask used in the devm_kmalloc() call when
* allocating memory
- * @fmt: the string to duplicate
+ * @fmt: The printf()-style format string
+ * @...: Arguments for the format string
* RETURNS:
* Pointer to allocated string on success, NULL on failure.
*/
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index 7d6e84a51424..55b83983a9c0 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -14,11 +14,14 @@ struct dma_coherent_mem {
int size;
int flags;
unsigned long *bitmap;
+ spinlock_t spinlock;
};
-int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
- dma_addr_t device_addr, size_t size, int flags)
+static int dma_init_coherent_memory(phys_addr_t phys_addr, dma_addr_t device_addr,
+ size_t size, int flags,
+ struct dma_coherent_mem **mem)
{
+ struct dma_coherent_mem *dma_mem = NULL;
void __iomem *mem_base = NULL;
int pages = size >> PAGE_SHIFT;
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
@@ -27,40 +30,77 @@ int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
goto out;
if (!size)
goto out;
- if (dev->dma_mem)
- goto out;
-
- /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
mem_base = ioremap(phys_addr, size);
if (!mem_base)
goto out;
- dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
- if (!dev->dma_mem)
+ dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
+ if (!dma_mem)
goto out;
- dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
- if (!dev->dma_mem->bitmap)
- goto free1_out;
+ dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
+ if (!dma_mem->bitmap)
+ goto out;
+
+ dma_mem->virt_base = mem_base;
+ dma_mem->device_base = device_addr;
+ dma_mem->pfn_base = PFN_DOWN(phys_addr);
+ dma_mem->size = pages;
+ dma_mem->flags = flags;
+ spin_lock_init(&dma_mem->spinlock);
- dev->dma_mem->virt_base = mem_base;
- dev->dma_mem->device_base = device_addr;
- dev->dma_mem->pfn_base = PFN_DOWN(phys_addr);
- dev->dma_mem->size = pages;
- dev->dma_mem->flags = flags;
+ *mem = dma_mem;
if (flags & DMA_MEMORY_MAP)
return DMA_MEMORY_MAP;
return DMA_MEMORY_IO;
- free1_out:
- kfree(dev->dma_mem);
- out:
+out:
+ kfree(dma_mem);
if (mem_base)
iounmap(mem_base);
return 0;
}
+
+static void dma_release_coherent_memory(struct dma_coherent_mem *mem)
+{
+ if (!mem)
+ return;
+ iounmap(mem->virt_base);
+ kfree(mem->bitmap);
+ kfree(mem);
+}
+
+static int dma_assign_coherent_memory(struct device *dev,
+ struct dma_coherent_mem *mem)
+{
+ if (dev->dma_mem)
+ return -EBUSY;
+
+ dev->dma_mem = mem;
+ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
+
+ return 0;
+}
+
+int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
+ dma_addr_t device_addr, size_t size, int flags)
+{
+ struct dma_coherent_mem *mem;
+ int ret;
+
+ ret = dma_init_coherent_memory(phys_addr, device_addr, size, flags,
+ &mem);
+ if (ret == 0)
+ return 0;
+
+ if (dma_assign_coherent_memory(dev, mem) == 0)
+ return ret;
+
+ dma_release_coherent_memory(mem);
+ return 0;
+}
EXPORT_SYMBOL(dma_declare_coherent_memory);
void dma_release_declared_memory(struct device *dev)
@@ -69,10 +109,8 @@ void dma_release_declared_memory(struct device *dev)
if (!mem)
return;
+ dma_release_coherent_memory(mem);
dev->dma_mem = NULL;
- iounmap(mem->virt_base);
- kfree(mem->bitmap);
- kfree(mem);
}
EXPORT_SYMBOL(dma_release_declared_memory);
@@ -80,6 +118,7 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
dma_addr_t device_addr, size_t size)
{
struct dma_coherent_mem *mem = dev->dma_mem;
+ unsigned long flags;
int pos, err;
size += device_addr & ~PAGE_MASK;
@@ -87,8 +126,11 @@ void *dma_mark_declared_memory_occupied(struct device *dev,
if (!mem)
return ERR_PTR(-EINVAL);
+ spin_lock_irqsave(&mem->spinlock, flags);
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
+ spin_unlock_irqrestore(&mem->spinlock, flags);
+
if (err != 0)
return ERR_PTR(err);
return mem->virt_base + (pos << PAGE_SHIFT);
@@ -115,6 +157,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
{
struct dma_coherent_mem *mem;
int order = get_order(size);
+ unsigned long flags;
int pageno;
if (!dev)
@@ -124,6 +167,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
return 0;
*ret = NULL;
+ spin_lock_irqsave(&mem->spinlock, flags);
if (unlikely(size > (mem->size << PAGE_SHIFT)))
goto err;
@@ -138,10 +182,12 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
memset(*ret, 0, size);
+ spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
err:
+ spin_unlock_irqrestore(&mem->spinlock, flags);
/*
* In the case where the allocation can not be satisfied from the
* per-device area, try to fall back to generic memory if the
@@ -171,8 +217,11 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
if (mem && vaddr >= mem->virt_base && vaddr <
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
+ unsigned long flags;
+ spin_lock_irqsave(&mem->spinlock, flags);
bitmap_release_region(mem->bitmap, page, order);
+ spin_unlock_irqrestore(&mem->spinlock, flags);
return 1;
}
return 0;
@@ -218,3 +267,61 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
return 0;
}
EXPORT_SYMBOL(dma_mmap_from_coherent);
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+static int rmem_dma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ struct dma_coherent_mem *mem = rmem->priv;
+
+ if (!mem &&
+ dma_init_coherent_memory(rmem->base, rmem->base, rmem->size,
+ DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE,
+ &mem) != DMA_MEMORY_MAP) {
+ pr_err("Reserved memory: failed to init DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return -ENODEV;
+ }
+ rmem->priv = mem;
+ dma_assign_coherent_memory(dev, mem);
+ return 0;
+}
+
+static void rmem_dma_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ dev->dma_mem = NULL;
+}
+
+static const struct reserved_mem_ops rmem_dma_ops = {
+ .device_init = rmem_dma_device_init,
+ .device_release = rmem_dma_device_release,
+};
+
+static int __init rmem_dma_setup(struct reserved_mem *rmem)
+{
+ unsigned long node = rmem->fdt_node;
+
+ if (of_get_flat_dt_prop(node, "reusable", NULL))
+ return -EINVAL;
+
+#ifdef CONFIG_ARM
+ if (!of_get_flat_dt_prop(node, "no-map", NULL)) {
+ pr_err("Reserved memory: regions without no-map are not yet supported\n");
+ return -EINVAL;
+ }
+#endif
+
+ rmem->ops = &rmem_dma_ops;
+ pr_info("Reserved memory: created DMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(dma, "shared-dma-pool", rmem_dma_setup);
+#endif
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 6606abdf880c..950fff9ce453 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -211,3 +211,70 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
{
return cma_release(dev_get_cma_area(dev), pages, count);
}
+
+/*
+ * Support for reserved memory regions defined in device tree
+ */
+#ifdef CONFIG_OF_RESERVED_MEM
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_reserved_mem.h>
+
+#undef pr_fmt
+#define pr_fmt(fmt) fmt
+
+static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
+{
+ dev_set_cma_area(dev, rmem->priv);
+ return 0;
+}
+
+static void rmem_cma_device_release(struct reserved_mem *rmem,
+ struct device *dev)
+{
+ dev_set_cma_area(dev, NULL);
+}
+
+static const struct reserved_mem_ops rmem_cma_ops = {
+ .device_init = rmem_cma_device_init,
+ .device_release = rmem_cma_device_release,
+};
+
+static int __init rmem_cma_setup(struct reserved_mem *rmem)
+{
+ phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
+ phys_addr_t mask = align - 1;
+ unsigned long node = rmem->fdt_node;
+ struct cma *cma;
+ int err;
+
+ if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
+ of_get_flat_dt_prop(node, "no-map", NULL))
+ return -EINVAL;
+
+ if ((rmem->base & mask) || (rmem->size & mask)) {
+ pr_err("Reserved memory: incorrect alignment of CMA region\n");
+ return -EINVAL;
+ }
+
+ err = cma_init_reserved_mem(rmem->base, rmem->size, 0, &cma);
+ if (err) {
+ pr_err("Reserved memory: unable to setup CMA region\n");
+ return err;
+ }
+ /* Architecture specific contiguous memory fixup. */
+ dma_contiguous_early_fixup(rmem->base, rmem->size);
+
+ if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
+ dma_contiguous_set_default(cma);
+
+ rmem->ops = &rmem_cma_ops;
+ rmem->priv = cma;
+
+ pr_info("Reserved memory: created CMA memory pool at %pa, size %ld MiB\n",
+ &rmem->base, (unsigned long)rmem->size / SZ_1M);
+
+ return 0;
+}
+RESERVEDMEM_OF_DECLARE(cma, "shared-dma-pool", rmem_cma_setup);
+#endif
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
index 6cd08e145bfa..9e8bbdd470ca 100644
--- a/drivers/base/dma-mapping.c
+++ b/drivers/base/dma-mapping.c
@@ -10,6 +10,8 @@
#include <linux/dma-mapping.h>
#include <linux/export.h>
#include <linux/gfp.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
#include <asm-generic/dma-coherent.h>
/*
@@ -267,3 +269,73 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
return ret;
}
EXPORT_SYMBOL(dma_common_mmap);
+
+#ifdef CONFIG_MMU
+/*
+ * remaps an array of PAGE_SIZE pages into another vm_area
+ * Cannot be used in non-sleeping contexts
+ */
+void *dma_common_pages_remap(struct page **pages, size_t size,
+ unsigned long vm_flags, pgprot_t prot,
+ const void *caller)
+{
+ struct vm_struct *area;
+
+ area = get_vm_area_caller(size, vm_flags, caller);
+ if (!area)
+ return NULL;
+
+ area->pages = pages;
+
+ if (map_vm_area(area, prot, pages)) {
+ vunmap(area->addr);
+ return NULL;
+ }
+
+ return area->addr;
+}
+
+/*
+ * remaps an allocated contiguous region into another vm_area.
+ * Cannot be used in non-sleeping contexts
+ */
+
+void *dma_common_contiguous_remap(struct page *page, size_t size,
+ unsigned long vm_flags,
+ pgprot_t prot, const void *caller)
+{
+ int i;
+ struct page **pages;
+ void *ptr;
+ unsigned long pfn;
+
+ pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL);
+ if (!pages)
+ return NULL;
+
+ for (i = 0, pfn = page_to_pfn(page); i < (size >> PAGE_SHIFT); i++)
+ pages[i] = pfn_to_page(pfn + i);
+
+ ptr = dma_common_pages_remap(pages, size, vm_flags, prot, caller);
+
+ kfree(pages);
+
+ return ptr;
+}
+
+/*
+ * unmaps a range previously mapped by dma_common_*_remap
+ */
+void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags)
+{
+ struct vm_struct *area = find_vm_area(cpu_addr);
+
+ if (!area || (area->flags & vm_flags) != vm_flags) {
+ WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr);
+ return;
+ }
+
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+}
+#endif
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index bf424305f3dc..3d785ebb48d3 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1105,6 +1105,9 @@ _request_firmware(const struct firmware **firmware_p, const char *name,
if (!firmware_p)
return -EINVAL;
+ if (!name || name[0] == '\0')
+ return -EINVAL;
+
ret = _request_firmware_prepare(&fw, name, device);
if (ret <= 0) /* error or already assigned */
goto out;
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index a2e13e250bba..7c5d87191b28 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -373,6 +373,45 @@ static ssize_t show_phys_device(struct device *dev,
return sprintf(buf, "%d\n", mem->phys_device);
}
+#ifdef CONFIG_MEMORY_HOTREMOVE
+static ssize_t show_valid_zones(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct memory_block *mem = to_memory_block(dev);
+ unsigned long start_pfn, end_pfn;
+ unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
+ struct page *first_page;
+ struct zone *zone;
+
+ start_pfn = section_nr_to_pfn(mem->start_section_nr);
+ end_pfn = start_pfn + nr_pages;
+ first_page = pfn_to_page(start_pfn);
+
+ /* The block contains more than one zone can not be offlined. */
+ if (!test_pages_in_a_zone(start_pfn, end_pfn))
+ return sprintf(buf, "none\n");
+
+ zone = page_zone(first_page);
+
+ if (zone_idx(zone) == ZONE_MOVABLE - 1) {
+ /*The mem block is the last memoryblock of this zone.*/
+ if (end_pfn == zone_end_pfn(zone))
+ return sprintf(buf, "%s %s\n",
+ zone->name, (zone + 1)->name);
+ }
+
+ if (zone_idx(zone) == ZONE_MOVABLE) {
+ /*The mem block is the first memoryblock of ZONE_MOVABLE.*/
+ if (start_pfn == zone->zone_start_pfn)
+ return sprintf(buf, "%s %s\n",
+ zone->name, (zone - 1)->name);
+ }
+
+ return sprintf(buf, "%s\n", zone->name);
+}
+static DEVICE_ATTR(valid_zones, 0444, show_valid_zones, NULL);
+#endif
+
static DEVICE_ATTR(phys_index, 0444, show_mem_start_phys_index, NULL);
static DEVICE_ATTR(state, 0644, show_mem_state, store_mem_state);
static DEVICE_ATTR(phys_device, 0444, show_phys_device, NULL);
@@ -523,6 +562,9 @@ static struct attribute *memory_memblk_attrs[] = {
&dev_attr_state.attr,
&dev_attr_phys_device.attr,
&dev_attr_removable.attr,
+#ifdef CONFIG_MEMORY_HOTREMOVE
+ &dev_attr_valid_zones.attr,
+#endif
NULL
};
diff --git a/drivers/base/node.c b/drivers/base/node.c
index c6d3ae05f1ca..472168cd0c97 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -289,8 +289,6 @@ static int register_node(struct node *node, int num, struct node *parent)
device_create_file(&node->dev, &dev_attr_distance);
device_create_file(&node->dev, &dev_attr_vmstat);
- scan_unevictable_register_node(node);
-
hugetlb_register_node(node);
compaction_register_node(node);
@@ -314,7 +312,6 @@ void unregister_node(struct node *node)
device_remove_file(&node->dev, &dev_attr_distance);
device_remove_file(&node->dev, &dev_attr_vmstat);
- scan_unevictable_unregister_node(node);
hugetlb_unregister_node(node); /* no-op, if memoryless node */
device_unregister(&node->dev);
@@ -603,7 +600,6 @@ void unregister_one_node(int nid)
return;
unregister_node(node_devices[nid]);
- kfree(node_devices[nid]);
node_devices[nid] = NULL;
}
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index ab4f4ce02722..b2afc29403f9 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -21,6 +21,7 @@
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
#include <linux/idr.h>
#include <linux/acpi.h>
#include <linux/clk/clk-conf.h>
@@ -506,11 +507,12 @@ static int platform_drv_probe(struct device *_dev)
if (ret < 0)
return ret;
- acpi_dev_pm_attach(_dev, true);
-
- ret = drv->probe(dev);
- if (ret)
- acpi_dev_pm_detach(_dev, true);
+ ret = dev_pm_domain_attach(_dev, true);
+ if (ret != -EPROBE_DEFER) {
+ ret = drv->probe(dev);
+ if (ret)
+ dev_pm_domain_detach(_dev, true);
+ }
if (drv->prevent_deferred_probe && ret == -EPROBE_DEFER) {
dev_warn(_dev, "probe deferral not supported\n");
@@ -532,7 +534,7 @@ static int platform_drv_remove(struct device *_dev)
int ret;
ret = drv->remove(dev);
- acpi_dev_pm_detach(_dev, true);
+ dev_pm_domain_detach(_dev, true);
return ret;
}
@@ -543,7 +545,7 @@ static void platform_drv_shutdown(struct device *_dev)
struct platform_device *dev = to_platform_device(_dev);
drv->shutdown(dev);
- acpi_dev_pm_detach(_dev, true);
+ dev_pm_domain_detach(_dev, true);
}
/**
diff --git a/drivers/base/power/clock_ops.c b/drivers/base/power/clock_ops.c
index b99e6c06ee67..78369305e069 100644
--- a/drivers/base/power/clock_ops.c
+++ b/drivers/base/power/clock_ops.c
@@ -368,8 +368,13 @@ int pm_clk_suspend(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry_reverse(ce, &psd->clock_list, node)
- clk_disable(ce->clk);
+ list_for_each_entry_reverse(ce, &psd->clock_list, node) {
+ if (ce->status < PCE_STATUS_ERROR) {
+ if (ce->status == PCE_STATUS_ENABLED)
+ clk_disable(ce->clk);
+ ce->status = PCE_STATUS_ACQUIRED;
+ }
+ }
spin_unlock_irqrestore(&psd->lock, flags);
@@ -385,6 +390,7 @@ int pm_clk_resume(struct device *dev)
struct pm_subsys_data *psd = dev_to_psd(dev);
struct pm_clock_entry *ce;
unsigned long flags;
+ int ret;
dev_dbg(dev, "%s()\n", __func__);
@@ -394,8 +400,13 @@ int pm_clk_resume(struct device *dev)
spin_lock_irqsave(&psd->lock, flags);
- list_for_each_entry(ce, &psd->clock_list, node)
- __pm_clk_enable(dev, ce->clk);
+ list_for_each_entry(ce, &psd->clock_list, node) {
+ if (ce->status < PCE_STATUS_ERROR) {
+ ret = __pm_clk_enable(dev, ce->clk);
+ if (!ret)
+ ce->status = PCE_STATUS_ENABLED;
+ }
+ }
spin_unlock_irqrestore(&psd->lock, flags);
diff --git a/drivers/base/power/common.c b/drivers/base/power/common.c
index df2e5eeaeb05..b0f138806bbc 100644
--- a/drivers/base/power/common.c
+++ b/drivers/base/power/common.c
@@ -11,6 +11,8 @@
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/pm_clock.h>
+#include <linux/acpi.h>
+#include <linux/pm_domain.h>
/**
* dev_pm_get_subsys_data - Create or refcount power.subsys_data for device.
@@ -82,3 +84,53 @@ int dev_pm_put_subsys_data(struct device *dev)
return ret;
}
EXPORT_SYMBOL_GPL(dev_pm_put_subsys_data);
+
+/**
+ * dev_pm_domain_attach - Attach a device to its PM domain.
+ * @dev: Device to attach.
+ * @power_on: Used to indicate whether we should power on the device.
+ *
+ * The @dev may only be attached to a single PM domain. By iterating through
+ * the available alternatives we try to find a valid PM domain for the device.
+ * As attachment succeeds, the ->detach() callback in the struct dev_pm_domain
+ * should be assigned by the corresponding attach function.
+ *
+ * This function should typically be invoked from subsystem level code during
+ * the probe phase. Especially for those that holds devices which requires
+ * power management through PM domains.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ *
+ * Returns 0 on successfully attached PM domain or negative error code.
+ */
+int dev_pm_domain_attach(struct device *dev, bool power_on)
+{
+ int ret;
+
+ ret = acpi_dev_pm_attach(dev, power_on);
+ if (ret)
+ ret = genpd_dev_pm_attach(dev);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_attach);
+
+/**
+ * dev_pm_domain_detach - Detach a device from its PM domain.
+ * @dev: Device to attach.
+ * @power_off: Used to indicate whether we should power off the device.
+ *
+ * This functions will reverse the actions from dev_pm_domain_attach() and thus
+ * try to detach the @dev from its PM domain. Typically it should be invoked
+ * from subsystem level code during the remove phase.
+ *
+ * Callers must ensure proper synchronization of this function with power
+ * management callbacks.
+ */
+void dev_pm_domain_detach(struct device *dev, bool power_off)
+{
+ if (dev->pm_domain && dev->pm_domain->detach)
+ dev->pm_domain->detach(dev, power_off);
+}
+EXPORT_SYMBOL_GPL(dev_pm_domain_detach);
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index eee55c1e5fde..fb83d4acd400 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/io.h>
+#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/pm_domain.h>
#include <linux/pm_qos.h>
@@ -25,10 +26,6 @@
__routine = genpd->dev_ops.callback; \
if (__routine) { \
__ret = __routine(dev); \
- } else { \
- __routine = dev_gpd_data(dev)->ops.callback; \
- if (__routine) \
- __ret = __routine(dev); \
} \
__ret; \
})
@@ -70,8 +67,6 @@ static struct generic_pm_domain *pm_genpd_lookup_name(const char *domain_name)
return genpd;
}
-#ifdef CONFIG_PM
-
struct generic_pm_domain *dev_to_genpd(struct device *dev)
{
if (IS_ERR_OR_NULL(dev->pm_domain))
@@ -147,13 +142,13 @@ static void genpd_recalc_cpu_exit_latency(struct generic_pm_domain *genpd)
{
s64 usecs64;
- if (!genpd->cpu_data)
+ if (!genpd->cpuidle_data)
return;
usecs64 = genpd->power_on_latency_ns;
do_div(usecs64, NSEC_PER_USEC);
- usecs64 += genpd->cpu_data->saved_exit_latency;
- genpd->cpu_data->idle_state->exit_latency = usecs64;
+ usecs64 += genpd->cpuidle_data->saved_exit_latency;
+ genpd->cpuidle_data->idle_state->exit_latency = usecs64;
}
/**
@@ -193,9 +188,9 @@ static int __pm_genpd_poweron(struct generic_pm_domain *genpd)
return 0;
}
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
cpuidle_pause_and_lock();
- genpd->cpu_data->idle_state->disabled = true;
+ genpd->cpuidle_data->idle_state->disabled = true;
cpuidle_resume_and_unlock();
goto out;
}
@@ -285,8 +280,6 @@ int pm_genpd_name_poweron(const char *domain_name)
return genpd ? pm_genpd_poweron(genpd) : -EINVAL;
}
-#endif /* CONFIG_PM */
-
#ifdef CONFIG_PM_RUNTIME
static int genpd_start_dev_no_timing(struct generic_pm_domain *genpd,
@@ -368,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
struct device *dev = pdd->dev;
int ret = 0;
- if (gpd_data->need_restore)
+ if (gpd_data->need_restore > 0)
return 0;
+ /*
+ * If the value of the need_restore flag is still unknown at this point,
+ * we trust that pm_genpd_poweroff() has verified that the device is
+ * already runtime PM suspended.
+ */
+ if (gpd_data->need_restore < 0) {
+ gpd_data->need_restore = 1;
+ return 0;
+ }
+
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
@@ -380,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
mutex_lock(&genpd->lock);
if (!ret)
- gpd_data->need_restore = true;
+ gpd_data->need_restore = 1;
return ret;
}
@@ -396,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
{
struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
struct device *dev = pdd->dev;
- bool need_restore = gpd_data->need_restore;
+ int need_restore = gpd_data->need_restore;
- gpd_data->need_restore = false;
+ gpd_data->need_restore = 0;
mutex_unlock(&genpd->lock);
genpd_start_dev(genpd, dev);
+
+ /*
+ * Call genpd_restore_dev() for recently added devices too (need_restore
+ * is negative then).
+ */
if (need_restore)
genpd_restore_dev(genpd, dev);
@@ -430,7 +438,7 @@ static bool genpd_abort_poweroff(struct generic_pm_domain *genpd)
* Queue up the execution of pm_genpd_poweroff() unless it's already been done
* before.
*/
-void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
+static void genpd_queue_power_off_work(struct generic_pm_domain *genpd)
{
queue_work(pm_wq, &genpd->power_off_work);
}
@@ -520,17 +528,17 @@ static int pm_genpd_poweroff(struct generic_pm_domain *genpd)
}
}
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
/*
- * If cpu_data is set, cpuidle should turn the domain off when
- * the CPU in it is idle. In that case we don't decrement the
- * subdomain counts of the master domains, so that power is not
- * removed from the current domain prematurely as a result of
- * cutting off the masters' power.
+ * If cpuidle_data is set, cpuidle should turn the domain off
+ * when the CPU in it is idle. In that case we don't decrement
+ * the subdomain counts of the master domains, so that power is
+ * not removed from the current domain prematurely as a result
+ * of cutting off the masters' power.
*/
genpd->status = GPD_STATE_POWER_OFF;
cpuidle_pause_and_lock();
- genpd->cpu_data->idle_state->disabled = false;
+ genpd->cpuidle_data->idle_state->disabled = false;
cpuidle_resume_and_unlock();
goto out;
}
@@ -610,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
static int pm_genpd_runtime_suspend(struct device *dev)
{
struct generic_pm_domain *genpd;
+ struct generic_pm_domain_data *gpd_data;
bool (*stop_ok)(struct device *__dev);
int ret;
@@ -619,8 +628,6 @@ static int pm_genpd_runtime_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- might_sleep_if(!genpd->dev_irq_safe);
-
stop_ok = genpd->gov ? genpd->gov->stop_ok : NULL;
if (stop_ok && !stop_ok(dev))
return -EBUSY;
@@ -637,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
return 0;
mutex_lock(&genpd->lock);
+
+ /*
+ * If we have an unknown state of the need_restore flag, it means none
+ * of the runtime PM callbacks has been invoked yet. Let's update the
+ * flag to reflect that the current state is active.
+ */
+ gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
+ if (gpd_data->need_restore < 0)
+ gpd_data->need_restore = 0;
+
genpd->in_progress++;
pm_genpd_poweroff(genpd);
genpd->in_progress--;
@@ -665,8 +682,6 @@ static int pm_genpd_runtime_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- might_sleep_if(!genpd->dev_irq_safe);
-
/* If power.irq_safe, the PM domain is never powered off. */
if (dev->power.irq_safe)
return genpd_start_dev_no_timing(genpd, dev);
@@ -733,6 +748,13 @@ void pm_genpd_poweroff_unused(void)
mutex_unlock(&gpd_list_lock);
}
+static int __init genpd_poweroff_unused(void)
+{
+ pm_genpd_poweroff_unused();
+ return 0;
+}
+late_initcall(genpd_poweroff_unused);
+
#else
static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
@@ -741,6 +763,9 @@ static inline int genpd_dev_pm_qos_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static inline void
+genpd_queue_power_off_work(struct generic_pm_domain *genpd) {}
+
static inline void genpd_power_off_work_fn(struct work_struct *work) {}
#define pm_genpd_runtime_suspend NULL
@@ -774,46 +799,6 @@ static bool genpd_dev_active_wakeup(struct generic_pm_domain *genpd,
return GENPD_DEV_CALLBACK(genpd, bool, active_wakeup, dev);
}
-static int genpd_suspend_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend, dev);
-}
-
-static int genpd_suspend_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, suspend_late, dev);
-}
-
-static int genpd_resume_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume_early, dev);
-}
-
-static int genpd_resume_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, resume, dev);
-}
-
-static int genpd_freeze_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze, dev);
-}
-
-static int genpd_freeze_late(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, freeze_late, dev);
-}
-
-static int genpd_thaw_early(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw_early, dev);
-}
-
-static int genpd_thaw_dev(struct generic_pm_domain *genpd, struct device *dev)
-{
- return GENPD_DEV_CALLBACK(genpd, int, thaw, dev);
-}
-
/**
* pm_genpd_sync_poweroff - Synchronously power off a PM domain and its masters.
* @genpd: PM domain to power off, if possible.
@@ -995,7 +980,7 @@ static int pm_genpd_suspend(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_suspend_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend(dev);
}
/**
@@ -1016,7 +1001,7 @@ static int pm_genpd_suspend_late(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_suspend_late(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_suspend_late(dev);
}
/**
@@ -1103,7 +1088,7 @@ static int pm_genpd_resume_early(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_resume_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_resume_early(dev);
}
/**
@@ -1124,7 +1109,7 @@ static int pm_genpd_resume(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_resume_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_resume(dev);
}
/**
@@ -1145,7 +1130,7 @@ static int pm_genpd_freeze(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_freeze_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze(dev);
}
/**
@@ -1167,7 +1152,7 @@ static int pm_genpd_freeze_late(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_freeze_late(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_freeze_late(dev);
}
/**
@@ -1231,7 +1216,7 @@ static int pm_genpd_thaw_early(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_thaw_early(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw_early(dev);
}
/**
@@ -1252,7 +1237,7 @@ static int pm_genpd_thaw(struct device *dev)
if (IS_ERR(genpd))
return -EINVAL;
- return genpd->suspend_power_off ? 0 : genpd_thaw_dev(genpd, dev);
+ return genpd->suspend_power_off ? 0 : pm_generic_thaw(dev);
}
/**
@@ -1344,13 +1329,13 @@ static void pm_genpd_complete(struct device *dev)
}
/**
- * pm_genpd_syscore_switch - Switch power during system core suspend or resume.
+ * genpd_syscore_switch - Switch power during system core suspend or resume.
* @dev: Device that normally is marked as "always on" to switch power for.
*
* This routine may only be called during the system core (syscore) suspend or
* resume phase for devices whose "always on" flags are set.
*/
-void pm_genpd_syscore_switch(struct device *dev, bool suspend)
+static void genpd_syscore_switch(struct device *dev, bool suspend)
{
struct generic_pm_domain *genpd;
@@ -1366,7 +1351,18 @@ void pm_genpd_syscore_switch(struct device *dev, bool suspend)
genpd->suspended_count--;
}
}
-EXPORT_SYMBOL_GPL(pm_genpd_syscore_switch);
+
+void pm_genpd_syscore_poweroff(struct device *dev)
+{
+ genpd_syscore_switch(dev, true);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweroff);
+
+void pm_genpd_syscore_poweron(struct device *dev)
+{
+ genpd_syscore_switch(dev, false);
+}
+EXPORT_SYMBOL_GPL(pm_genpd_syscore_poweron);
#else
@@ -1466,10 +1462,13 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
spin_unlock_irq(&dev->power.lock);
+ if (genpd->attach_dev)
+ genpd->attach_dev(genpd, dev);
+
mutex_lock(&gpd_data->lock);
gpd_data->base.dev = dev;
list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
- gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF;
+ gpd_data->need_restore = -1;
gpd_data->td.constraint_changed = true;
gpd_data->td.effective_constraint_ns = -1;
mutex_unlock(&gpd_data->lock);
@@ -1484,39 +1483,6 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
}
/**
- * __pm_genpd_of_add_device - Add a device to an I/O PM domain.
- * @genpd_node: Device tree node pointer representing a PM domain to which the
- * the device is added to.
- * @dev: Device to be added.
- * @td: Set of PM QoS timing parameters to attach to the device.
- */
-int __pm_genpd_of_add_device(struct device_node *genpd_node, struct device *dev,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain *genpd = NULL, *gpd;
-
- dev_dbg(dev, "%s()\n", __func__);
-
- if (IS_ERR_OR_NULL(genpd_node) || IS_ERR_OR_NULL(dev))
- return -EINVAL;
-
- mutex_lock(&gpd_list_lock);
- list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
- if (gpd->of_node == genpd_node) {
- genpd = gpd;
- break;
- }
- }
- mutex_unlock(&gpd_list_lock);
-
- if (!genpd)
- return -EINVAL;
-
- return __pm_genpd_add_device(genpd, dev, td);
-}
-
-
-/**
* __pm_genpd_name_add_device - Find I/O PM domain and add a device to it.
* @domain_name: Name of the PM domain to add the device to.
* @dev: Device to be added.
@@ -1558,6 +1524,9 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
genpd->device_count--;
genpd->max_off_time_changed = true;
+ if (genpd->detach_dev)
+ genpd->detach_dev(genpd, dev);
+
spin_lock_irq(&dev->power.lock);
dev->pm_domain = NULL;
@@ -1603,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
psd = dev_to_psd(dev);
if (psd && psd->domain_data)
- to_gpd_data(psd->domain_data)->need_restore = val;
+ to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
spin_unlock_irqrestore(&dev->power.lock, flags);
}
@@ -1744,112 +1713,6 @@ int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
}
/**
- * pm_genpd_add_callbacks - Add PM domain callbacks to a given device.
- * @dev: Device to add the callbacks to.
- * @ops: Set of callbacks to add.
- * @td: Timing data to add to the device along with the callbacks (optional).
- *
- * Every call to this routine should be balanced with a call to
- * __pm_genpd_remove_callbacks() and they must not be nested.
- */
-int pm_genpd_add_callbacks(struct device *dev, struct gpd_dev_ops *ops,
- struct gpd_timing_data *td)
-{
- struct generic_pm_domain_data *gpd_data_new, *gpd_data = NULL;
- int ret = 0;
-
- if (!(dev && ops))
- return -EINVAL;
-
- gpd_data_new = __pm_genpd_alloc_dev_data(dev);
- if (!gpd_data_new)
- return -ENOMEM;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- ret = dev_pm_get_subsys_data(dev);
- if (ret)
- goto out;
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data->domain_data) {
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- } else {
- gpd_data = gpd_data_new;
- dev->power.subsys_data->domain_data = &gpd_data->base;
- }
- gpd_data->refcount++;
- gpd_data->ops = *ops;
- if (td)
- gpd_data->td = *td;
-
- spin_unlock_irq(&dev->power.lock);
-
- out:
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- if (gpd_data != gpd_data_new)
- __pm_genpd_free_dev_data(dev, gpd_data_new);
-
- return ret;
-}
-EXPORT_SYMBOL_GPL(pm_genpd_add_callbacks);
-
-/**
- * __pm_genpd_remove_callbacks - Remove PM domain callbacks from a given device.
- * @dev: Device to remove the callbacks from.
- * @clear_td: If set, clear the device's timing data too.
- *
- * This routine can only be called after pm_genpd_add_callbacks().
- */
-int __pm_genpd_remove_callbacks(struct device *dev, bool clear_td)
-{
- struct generic_pm_domain_data *gpd_data = NULL;
- bool remove = false;
- int ret = 0;
-
- if (!(dev && dev->power.subsys_data))
- return -EINVAL;
-
- pm_runtime_disable(dev);
- device_pm_lock();
-
- spin_lock_irq(&dev->power.lock);
-
- if (dev->power.subsys_data->domain_data) {
- gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
- gpd_data->ops = (struct gpd_dev_ops){ NULL };
- if (clear_td)
- gpd_data->td = (struct gpd_timing_data){ 0 };
-
- if (--gpd_data->refcount == 0) {
- dev->power.subsys_data->domain_data = NULL;
- remove = true;
- }
- } else {
- ret = -EINVAL;
- }
-
- spin_unlock_irq(&dev->power.lock);
-
- device_pm_unlock();
- pm_runtime_enable(dev);
-
- if (ret)
- return ret;
-
- dev_pm_put_subsys_data(dev);
- if (remove)
- __pm_genpd_free_dev_data(dev, gpd_data);
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
-
-/**
* pm_genpd_attach_cpuidle - Connect the given PM domain with cpuidle.
* @genpd: PM domain to be connected with cpuidle.
* @state: cpuidle state this domain can disable/enable.
@@ -1861,7 +1724,7 @@ EXPORT_SYMBOL_GPL(__pm_genpd_remove_callbacks);
int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
{
struct cpuidle_driver *cpuidle_drv;
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
struct cpuidle_state *idle_state;
int ret = 0;
@@ -1870,12 +1733,12 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
genpd_acquire_lock(genpd);
- if (genpd->cpu_data) {
+ if (genpd->cpuidle_data) {
ret = -EEXIST;
goto out;
}
- cpu_data = kzalloc(sizeof(*cpu_data), GFP_KERNEL);
- if (!cpu_data) {
+ cpuidle_data = kzalloc(sizeof(*cpuidle_data), GFP_KERNEL);
+ if (!cpuidle_data) {
ret = -ENOMEM;
goto out;
}
@@ -1893,9 +1756,9 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
ret = -EAGAIN;
goto err;
}
- cpu_data->idle_state = idle_state;
- cpu_data->saved_exit_latency = idle_state->exit_latency;
- genpd->cpu_data = cpu_data;
+ cpuidle_data->idle_state = idle_state;
+ cpuidle_data->saved_exit_latency = idle_state->exit_latency;
+ genpd->cpuidle_data = cpuidle_data;
genpd_recalc_cpu_exit_latency(genpd);
out:
@@ -1906,7 +1769,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
cpuidle_driver_unref();
err_drv:
- kfree(cpu_data);
+ kfree(cpuidle_data);
goto out;
}
@@ -1929,7 +1792,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
*/
int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
{
- struct gpd_cpu_data *cpu_data;
+ struct gpd_cpuidle_data *cpuidle_data;
struct cpuidle_state *idle_state;
int ret = 0;
@@ -1938,20 +1801,20 @@ int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
genpd_acquire_lock(genpd);
- cpu_data = genpd->cpu_data;
- if (!cpu_data) {
+ cpuidle_data = genpd->cpuidle_data;
+ if (!cpuidle_data) {
ret = -ENODEV;
goto out;
}
- idle_state = cpu_data->idle_state;
+ idle_state = cpuidle_data->idle_state;
if (!idle_state->disabled) {
ret = -EAGAIN;
goto out;
}
- idle_state->exit_latency = cpu_data->saved_exit_latency;
+ idle_state->exit_latency = cpuidle_data->saved_exit_latency;
cpuidle_driver_unref();
- genpd->cpu_data = NULL;
- kfree(cpu_data);
+ genpd->cpuidle_data = NULL;
+ kfree(cpuidle_data);
out:
genpd_release_lock(genpd);
@@ -1970,17 +1833,13 @@ int pm_genpd_name_detach_cpuidle(const char *name)
/* Default device callbacks for generic PM domains. */
/**
- * pm_genpd_default_save_state - Default "save device state" for PM domians.
+ * pm_genpd_default_save_state - Default "save device state" for PM domains.
* @dev: Device to handle.
*/
static int pm_genpd_default_save_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- cb = dev_gpd_data(dev)->ops.save_state;
- if (cb)
- return cb(dev);
-
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_suspend;
else if (dev->class && dev->class->pm)
@@ -1997,17 +1856,13 @@ static int pm_genpd_default_save_state(struct device *dev)
}
/**
- * pm_genpd_default_restore_state - Default PM domians "restore device state".
+ * pm_genpd_default_restore_state - Default PM domains "restore device state".
* @dev: Device to handle.
*/
static int pm_genpd_default_restore_state(struct device *dev)
{
int (*cb)(struct device *__dev);
- cb = dev_gpd_data(dev)->ops.restore_state;
- if (cb)
- return cb(dev);
-
if (dev->type && dev->type->pm)
cb = dev->type->pm->runtime_resume;
else if (dev->class && dev->class->pm)
@@ -2023,109 +1878,6 @@ static int pm_genpd_default_restore_state(struct device *dev)
return cb ? cb(dev) : 0;
}
-#ifdef CONFIG_PM_SLEEP
-
-/**
- * pm_genpd_default_suspend - Default "device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend;
-
- return cb ? cb(dev) : pm_generic_suspend(dev);
-}
-
-/**
- * pm_genpd_default_suspend_late - Default "late device suspend" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_suspend_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.suspend_late;
-
- return cb ? cb(dev) : pm_generic_suspend_late(dev);
-}
-
-/**
- * pm_genpd_default_resume_early - Default "early device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume_early;
-
- return cb ? cb(dev) : pm_generic_resume_early(dev);
-}
-
-/**
- * pm_genpd_default_resume - Default "device resume" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_resume(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.resume;
-
- return cb ? cb(dev) : pm_generic_resume(dev);
-}
-
-/**
- * pm_genpd_default_freeze - Default "device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze;
-
- return cb ? cb(dev) : pm_generic_freeze(dev);
-}
-
-/**
- * pm_genpd_default_freeze_late - Default "late device freeze" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_freeze_late(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.freeze_late;
-
- return cb ? cb(dev) : pm_generic_freeze_late(dev);
-}
-
-/**
- * pm_genpd_default_thaw_early - Default "early device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw_early(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw_early;
-
- return cb ? cb(dev) : pm_generic_thaw_early(dev);
-}
-
-/**
- * pm_genpd_default_thaw - Default "device thaw" for PM domians.
- * @dev: Device to handle.
- */
-static int pm_genpd_default_thaw(struct device *dev)
-{
- int (*cb)(struct device *__dev) = dev_gpd_data(dev)->ops.thaw;
-
- return cb ? cb(dev) : pm_generic_thaw(dev);
-}
-
-#else /* !CONFIG_PM_SLEEP */
-
-#define pm_genpd_default_suspend NULL
-#define pm_genpd_default_suspend_late NULL
-#define pm_genpd_default_resume_early NULL
-#define pm_genpd_default_resume NULL
-#define pm_genpd_default_freeze NULL
-#define pm_genpd_default_freeze_late NULL
-#define pm_genpd_default_thaw_early NULL
-#define pm_genpd_default_thaw NULL
-
-#endif /* !CONFIG_PM_SLEEP */
-
/**
* pm_genpd_init - Initialize a generic I/O PM domain object.
* @genpd: PM domain object to initialize.
@@ -2177,15 +1929,452 @@ void pm_genpd_init(struct generic_pm_domain *genpd,
genpd->domain.ops.complete = pm_genpd_complete;
genpd->dev_ops.save_state = pm_genpd_default_save_state;
genpd->dev_ops.restore_state = pm_genpd_default_restore_state;
- genpd->dev_ops.suspend = pm_genpd_default_suspend;
- genpd->dev_ops.suspend_late = pm_genpd_default_suspend_late;
- genpd->dev_ops.resume_early = pm_genpd_default_resume_early;
- genpd->dev_ops.resume = pm_genpd_default_resume;
- genpd->dev_ops.freeze = pm_genpd_default_freeze;
- genpd->dev_ops.freeze_late = pm_genpd_default_freeze_late;
- genpd->dev_ops.thaw_early = pm_genpd_default_thaw_early;
- genpd->dev_ops.thaw = pm_genpd_default_thaw;
mutex_lock(&gpd_list_lock);
list_add(&genpd->gpd_list_node, &gpd_list);
mutex_unlock(&gpd_list_lock);
}
+
+#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
+/*
+ * Device Tree based PM domain providers.
+ *
+ * The code below implements generic device tree based PM domain providers that
+ * bind device tree nodes with generic PM domains registered in the system.
+ *
+ * Any driver that registers generic PM domains and needs to support binding of
+ * devices to these domains is supposed to register a PM domain provider, which
+ * maps a PM domain specifier retrieved from the device tree to a PM domain.
+ *
+ * Two simple mapping functions have been provided for convenience:
+ * - __of_genpd_xlate_simple() for 1:1 device tree node to PM domain mapping.
+ * - __of_genpd_xlate_onecell() for mapping of multiple PM domains per node by
+ * index.
+ */
+
+/**
+ * struct of_genpd_provider - PM domain provider registration structure
+ * @link: Entry in global list of PM domain providers
+ * @node: Pointer to device tree node of PM domain provider
+ * @xlate: Provider-specific xlate callback mapping a set of specifier cells
+ * into a PM domain.
+ * @data: context pointer to be passed into @xlate callback
+ */
+struct of_genpd_provider {
+ struct list_head link;
+ struct device_node *node;
+ genpd_xlate_t xlate;
+ void *data;
+};
+
+/* List of registered PM domain providers. */
+static LIST_HEAD(of_genpd_providers);
+/* Mutex to protect the list above. */
+static DEFINE_MUTEX(of_genpd_mutex);
+
+/**
+ * __of_genpd_xlate_simple() - Xlate function for direct node-domain mapping
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct generic_pm_domain
+ *
+ * This is a generic xlate function that can be used to model PM domains that
+ * have their own device tree nodes. The private data of xlate function needs
+ * to be a valid pointer to struct generic_pm_domain.
+ */
+struct generic_pm_domain *__of_genpd_xlate_simple(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ if (genpdspec->args_count != 0)
+ return ERR_PTR(-EINVAL);
+ return data;
+}
+EXPORT_SYMBOL_GPL(__of_genpd_xlate_simple);
+
+/**
+ * __of_genpd_xlate_onecell() - Xlate function using a single index.
+ * @genpdspec: OF phandle args to map into a PM domain
+ * @data: xlate function private data - pointer to struct genpd_onecell_data
+ *
+ * This is a generic xlate function that can be used to model simple PM domain
+ * controllers that have one device tree node and provide multiple PM domains.
+ * A single cell is used as an index into an array of PM domains specified in
+ * the genpd_onecell_data struct when registering the provider.
+ */
+struct generic_pm_domain *__of_genpd_xlate_onecell(
+ struct of_phandle_args *genpdspec,
+ void *data)
+{
+ struct genpd_onecell_data *genpd_data = data;
+ unsigned int idx = genpdspec->args[0];
+
+ if (genpdspec->args_count != 1)
+ return ERR_PTR(-EINVAL);
+
+ if (idx >= genpd_data->num_domains) {
+ pr_err("%s: invalid domain index %u\n", __func__, idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!genpd_data->domains[idx])
+ return ERR_PTR(-ENOENT);
+
+ return genpd_data->domains[idx];
+}
+EXPORT_SYMBOL_GPL(__of_genpd_xlate_onecell);
+
+/**
+ * __of_genpd_add_provider() - Register a PM domain provider for a node
+ * @np: Device node pointer associated with the PM domain provider.
+ * @xlate: Callback for decoding PM domain from phandle arguments.
+ * @data: Context pointer for @xlate callback.
+ */
+int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
+ void *data)
+{
+ struct of_genpd_provider *cp;
+
+ cp = kzalloc(sizeof(*cp), GFP_KERNEL);
+ if (!cp)
+ return -ENOMEM;
+
+ cp->node = of_node_get(np);
+ cp->data = data;
+ cp->xlate = xlate;
+
+ mutex_lock(&of_genpd_mutex);
+ list_add(&cp->link, &of_genpd_providers);
+ mutex_unlock(&of_genpd_mutex);
+ pr_debug("Added domain provider from %s\n", np->full_name);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__of_genpd_add_provider);
+
+/**
+ * of_genpd_del_provider() - Remove a previously registered PM domain provider
+ * @np: Device node pointer associated with the PM domain provider
+ */
+void of_genpd_del_provider(struct device_node *np)
+{
+ struct of_genpd_provider *cp;
+
+ mutex_lock(&of_genpd_mutex);
+ list_for_each_entry(cp, &of_genpd_providers, link) {
+ if (cp->node == np) {
+ list_del(&cp->link);
+ of_node_put(cp->node);
+ kfree(cp);
+ break;
+ }
+ }
+ mutex_unlock(&of_genpd_mutex);
+}
+EXPORT_SYMBOL_GPL(of_genpd_del_provider);
+
+/**
+ * of_genpd_get_from_provider() - Look-up PM domain
+ * @genpdspec: OF phandle args to use for look-up
+ *
+ * Looks for a PM domain provider under the node specified by @genpdspec and if
+ * found, uses xlate function of the provider to map phandle args to a PM
+ * domain.
+ *
+ * Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
+ * on failure.
+ */
+static struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
+ struct of_genpd_provider *provider;
+
+ mutex_lock(&of_genpd_mutex);
+
+ /* Check if we have such a provider in our array */
+ list_for_each_entry(provider, &of_genpd_providers, link) {
+ if (provider->node == genpdspec->np)
+ genpd = provider->xlate(genpdspec, provider->data);
+ if (!IS_ERR(genpd))
+ break;
+ }
+
+ mutex_unlock(&of_genpd_mutex);
+
+ return genpd;
+}
+
+/**
+ * genpd_dev_pm_detach - Detach a device from its PM domain.
+ * @dev: Device to attach.
+ * @power_off: Currently not used
+ *
+ * Try to locate a corresponding generic PM domain, which the device was
+ * attached to previously. If such is found, the device is detached from it.
+ */
+static void genpd_dev_pm_detach(struct device *dev, bool power_off)
+{
+ struct generic_pm_domain *pd = NULL, *gpd;
+ int ret = 0;
+
+ if (!dev->pm_domain)
+ return;
+
+ mutex_lock(&gpd_list_lock);
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ if (&gpd->domain == dev->pm_domain) {
+ pd = gpd;
+ break;
+ }
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ if (!pd)
+ return;
+
+ dev_dbg(dev, "removing from PM domain %s\n", pd->name);
+
+ while (1) {
+ ret = pm_genpd_remove_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to remove from PM domain %s: %d",
+ pd->name, ret);
+ return;
+ }
+
+ /* Check if PM domain can be powered off after removing this device. */
+ genpd_queue_power_off_work(pd);
+}
+
+/**
+ * genpd_dev_pm_attach - Attach a device to its PM domain using DT.
+ * @dev: Device to attach.
+ *
+ * Parse device's OF node to find a PM domain specifier. If such is found,
+ * attaches the device to retrieved pm_domain ops.
+ *
+ * Both generic and legacy Samsung-specific DT bindings are supported to keep
+ * backwards compatibility with existing DTBs.
+ *
+ * Returns 0 on successfully attached PM domain or negative error code.
+ */
+int genpd_dev_pm_attach(struct device *dev)
+{
+ struct of_phandle_args pd_args;
+ struct generic_pm_domain *pd;
+ int ret;
+
+ if (!dev->of_node)
+ return -ENODEV;
+
+ if (dev->pm_domain)
+ return -EEXIST;
+
+ ret = of_parse_phandle_with_args(dev->of_node, "power-domains",
+ "#power-domain-cells", 0, &pd_args);
+ if (ret < 0) {
+ if (ret != -ENOENT)
+ return ret;
+
+ /*
+ * Try legacy Samsung-specific bindings
+ * (for backwards compatibility of DT ABI)
+ */
+ pd_args.args_count = 0;
+ pd_args.np = of_parse_phandle(dev->of_node,
+ "samsung,power-domain", 0);
+ if (!pd_args.np)
+ return -ENOENT;
+ }
+
+ pd = of_genpd_get_from_provider(&pd_args);
+ if (IS_ERR(pd)) {
+ dev_dbg(dev, "%s() failed to find PM domain: %ld\n",
+ __func__, PTR_ERR(pd));
+ of_node_put(dev->of_node);
+ return PTR_ERR(pd);
+ }
+
+ dev_dbg(dev, "adding to PM domain %s\n", pd->name);
+
+ while (1) {
+ ret = pm_genpd_add_device(pd, dev);
+ if (ret != -EAGAIN)
+ break;
+ cond_resched();
+ }
+
+ if (ret < 0) {
+ dev_err(dev, "failed to add to PM domain %s: %d",
+ pd->name, ret);
+ of_node_put(dev->of_node);
+ return ret;
+ }
+
+ dev->pm_domain->detach = genpd_dev_pm_detach;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
+#endif
+
+
+/*** debugfs support ***/
+
+#ifdef CONFIG_PM_ADVANCED_DEBUG
+#include <linux/pm.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/kobject.h>
+static struct dentry *pm_genpd_debugfs_dir;
+
+/*
+ * TODO: This function is a slightly modified version of rtpm_status_show
+ * from sysfs.c, but dependencies between PM_GENERIC_DOMAINS and PM_RUNTIME
+ * are too loose to generalize it.
+ */
+#ifdef CONFIG_PM_RUNTIME
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ static const char * const status_lookup[] = {
+ [RPM_ACTIVE] = "active",
+ [RPM_RESUMING] = "resuming",
+ [RPM_SUSPENDED] = "suspended",
+ [RPM_SUSPENDING] = "suspending"
+ };
+ const char *p = "";
+
+ if (dev->power.runtime_error)
+ p = "error";
+ else if (dev->power.disable_depth)
+ p = "unsupported";
+ else if (dev->power.runtime_status < ARRAY_SIZE(status_lookup))
+ p = status_lookup[dev->power.runtime_status];
+ else
+ WARN_ON(1);
+
+ seq_puts(s, p);
+}
+#else
+static void rtpm_status_str(struct seq_file *s, struct device *dev)
+{
+ seq_puts(s, "active");
+}
+#endif
+
+static int pm_genpd_summary_one(struct seq_file *s,
+ struct generic_pm_domain *gpd)
+{
+ static const char * const status_lookup[] = {
+ [GPD_STATE_ACTIVE] = "on",
+ [GPD_STATE_WAIT_MASTER] = "wait-master",
+ [GPD_STATE_BUSY] = "busy",
+ [GPD_STATE_REPEAT] = "off-in-progress",
+ [GPD_STATE_POWER_OFF] = "off"
+ };
+ struct pm_domain_data *pm_data;
+ const char *kobj_path;
+ struct gpd_link *link;
+ int ret;
+
+ ret = mutex_lock_interruptible(&gpd->lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ if (WARN_ON(gpd->status >= ARRAY_SIZE(status_lookup)))
+ goto exit;
+ seq_printf(s, "%-30s %-15s ", gpd->name, status_lookup[gpd->status]);
+
+ /*
+ * Modifications on the list require holding locks on both
+ * master and slave, so we are safe.
+ * Also gpd->name is immutable.
+ */
+ list_for_each_entry(link, &gpd->master_links, master_node) {
+ seq_printf(s, "%s", link->slave->name);
+ if (!list_is_last(&link->master_node, &gpd->master_links))
+ seq_puts(s, ", ");
+ }
+
+ list_for_each_entry(pm_data, &gpd->dev_list, list_node) {
+ kobj_path = kobject_get_path(&pm_data->dev->kobj, GFP_KERNEL);
+ if (kobj_path == NULL)
+ continue;
+
+ seq_printf(s, "\n %-50s ", kobj_path);
+ rtpm_status_str(s, pm_data->dev);
+ kfree(kobj_path);
+ }
+
+ seq_puts(s, "\n");
+exit:
+ mutex_unlock(&gpd->lock);
+
+ return 0;
+}
+
+static int pm_genpd_summary_show(struct seq_file *s, void *data)
+{
+ struct generic_pm_domain *gpd;
+ int ret = 0;
+
+ seq_puts(s, " domain status slaves\n");
+ seq_puts(s, " /device runtime status\n");
+ seq_puts(s, "----------------------------------------------------------------------\n");
+
+ ret = mutex_lock_interruptible(&gpd_list_lock);
+ if (ret)
+ return -ERESTARTSYS;
+
+ list_for_each_entry(gpd, &gpd_list, gpd_list_node) {
+ ret = pm_genpd_summary_one(s, gpd);
+ if (ret)
+ break;
+ }
+ mutex_unlock(&gpd_list_lock);
+
+ return ret;
+}
+
+static int pm_genpd_summary_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, pm_genpd_summary_show, NULL);
+}
+
+static const struct file_operations pm_genpd_summary_fops = {
+ .open = pm_genpd_summary_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init pm_genpd_debug_init(void)
+{
+ struct dentry *d;
+
+ pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL);
+
+ if (!pm_genpd_debugfs_dir)
+ return -ENOMEM;
+
+ d = debugfs_create_file("pm_genpd_summary", S_IRUGO,
+ pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops);
+ if (!d)
+ return -ENOMEM;
+
+ return 0;
+}
+late_initcall(pm_genpd_debug_init);
+
+static void __exit pm_genpd_debug_exit(void)
+{
+ debugfs_remove_recursive(pm_genpd_debugfs_dir);
+}
+__exitcall(pm_genpd_debug_exit);
+#endif /* CONFIG_PM_ADVANCED_DEBUG */
diff --git a/drivers/base/power/domain_governor.c b/drivers/base/power/domain_governor.c
index a089e3bcdfbc..d88a62e104d4 100644
--- a/drivers/base/power/domain_governor.c
+++ b/drivers/base/power/domain_governor.c
@@ -42,7 +42,7 @@ static int dev_update_qos_constraint(struct device *dev, void *data)
* default_stop_ok - Default PM domain governor routine for stopping devices.
* @dev: Device to check.
*/
-bool default_stop_ok(struct device *dev)
+static bool default_stop_ok(struct device *dev)
{
struct gpd_timing_data *td = &dev_gpd_data(dev)->td;
unsigned long flags;
@@ -229,10 +229,7 @@ static bool always_on_power_down_ok(struct dev_pm_domain *domain)
#else /* !CONFIG_PM_RUNTIME */
-bool default_stop_ok(struct device *dev)
-{
- return false;
-}
+static inline bool default_stop_ok(struct device *dev) { return false; }
#define default_power_down_ok NULL
#define always_on_power_down_ok NULL
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index b67d9aef9fe4..9717d5f20139 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -540,7 +540,7 @@ static void async_resume_noirq(void *data, async_cookie_t cookie)
* Call the "noirq" resume handlers for all devices in dpm_noirq_list and
* enable device drivers to receive interrupts.
*/
-static void dpm_resume_noirq(pm_message_t state)
+void dpm_resume_noirq(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -662,7 +662,7 @@ static void async_resume_early(void *data, async_cookie_t cookie)
* dpm_resume_early - Execute "early resume" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
-static void dpm_resume_early(pm_message_t state)
+void dpm_resume_early(pm_message_t state)
{
struct device *dev;
ktime_t starttime = ktime_get();
@@ -1093,7 +1093,7 @@ static int device_suspend_noirq(struct device *dev)
* Prevent device drivers from receiving interrupts and call the "noirq" suspend
* handlers for all non-sysdev devices.
*/
-static int dpm_suspend_noirq(pm_message_t state)
+int dpm_suspend_noirq(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
@@ -1232,7 +1232,7 @@ static int device_suspend_late(struct device *dev)
* dpm_suspend_late - Execute "late suspend" callbacks for all devices.
* @state: PM transition of the system being carried out.
*/
-static int dpm_suspend_late(pm_message_t state)
+int dpm_suspend_late(pm_message_t state)
{
ktime_t starttime = ktime_get();
int error = 0;
@@ -1266,6 +1266,8 @@ static int dpm_suspend_late(pm_message_t state)
}
mutex_unlock(&dpm_list_mtx);
async_synchronize_full();
+ if (!error)
+ error = async_error;
if (error) {
suspend_stats.failed_suspend_late++;
dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
index 95b181d1ca6d..a9d26ed11bf4 100644
--- a/drivers/base/power/sysfs.c
+++ b/drivers/base/power/sysfs.c
@@ -92,9 +92,6 @@
* wakeup_count - Report the number of wakeup events related to the device
*/
-static const char enabled[] = "enabled";
-static const char disabled[] = "disabled";
-
const char power_group_name[] = "power";
EXPORT_SYMBOL_GPL(power_group_name);
@@ -336,11 +333,14 @@ static DEVICE_ATTR(pm_qos_remote_wakeup, 0644,
#endif /* CONFIG_PM_RUNTIME */
#ifdef CONFIG_PM_SLEEP
+static const char _enabled[] = "enabled";
+static const char _disabled[] = "disabled";
+
static ssize_t
wake_show(struct device * dev, struct device_attribute *attr, char * buf)
{
return sprintf(buf, "%s\n", device_can_wakeup(dev)
- ? (device_may_wakeup(dev) ? enabled : disabled)
+ ? (device_may_wakeup(dev) ? _enabled : _disabled)
: "");
}
@@ -357,11 +357,11 @@ wake_store(struct device * dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
- if (len == sizeof enabled - 1
- && strncmp(buf, enabled, sizeof enabled - 1) == 0)
+ if (len == sizeof _enabled - 1
+ && strncmp(buf, _enabled, sizeof _enabled - 1) == 0)
device_set_wakeup_enable(dev, 1);
- else if (len == sizeof disabled - 1
- && strncmp(buf, disabled, sizeof disabled - 1) == 0)
+ else if (len == sizeof _disabled - 1
+ && strncmp(buf, _disabled, sizeof _disabled - 1) == 0)
device_set_wakeup_enable(dev, 0);
else
return -EINVAL;
@@ -570,7 +570,8 @@ static ssize_t async_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%s\n",
- device_async_suspend_enabled(dev) ? enabled : disabled);
+ device_async_suspend_enabled(dev) ?
+ _enabled : _disabled);
}
static ssize_t async_store(struct device *dev, struct device_attribute *attr,
@@ -582,9 +583,10 @@ static ssize_t async_store(struct device *dev, struct device_attribute *attr,
cp = memchr(buf, '\n', n);
if (cp)
len = cp - buf;
- if (len == sizeof enabled - 1 && strncmp(buf, enabled, len) == 0)
+ if (len == sizeof _enabled - 1 && strncmp(buf, _enabled, len) == 0)
device_enable_async_suspend(dev);
- else if (len == sizeof disabled - 1 && strncmp(buf, disabled, len) == 0)
+ else if (len == sizeof _disabled - 1 &&
+ strncmp(buf, _disabled, len) == 0)
device_disable_async_suspend(dev);
else
return -EINVAL;
diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
index eb1bd2ecad8b..c2744b30d5d9 100644
--- a/drivers/base/power/wakeup.c
+++ b/drivers/base/power/wakeup.c
@@ -24,6 +24,9 @@
*/
bool events_check_enabled __read_mostly;
+/* If set and the system is suspending, terminate the suspend. */
+static bool pm_abort_suspend __read_mostly;
+
/*
* Combined counters of registered wakeup events and wakeup events in progress.
* They need to be modified together atomically, so it's better to use one
@@ -719,7 +722,18 @@ bool pm_wakeup_pending(void)
pm_print_active_wakeup_sources();
}
- return ret;
+ return ret || pm_abort_suspend;
+}
+
+void pm_system_wakeup(void)
+{
+ pm_abort_suspend = true;
+ freeze_wake();
+}
+
+void pm_wakeup_clear(void)
+{
+ pm_abort_suspend = false;
}
/**
diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
index dbb8350ea8dc..8d98a329f6ea 100644
--- a/drivers/base/syscore.c
+++ b/drivers/base/syscore.c
@@ -9,7 +9,7 @@
#include <linux/syscore_ops.h>
#include <linux/mutex.h>
#include <linux/module.h>
-#include <linux/interrupt.h>
+#include <linux/suspend.h>
#include <trace/events/power.h>
static LIST_HEAD(syscore_ops_list);
@@ -54,9 +54,8 @@ int syscore_suspend(void)
pr_debug("Checking wakeup interrupts\n");
/* Return error code if there are any wakeup interrupts pending. */
- ret = check_wakeup_irqs();
- if (ret)
- return ret;
+ if (pm_wakeup_pending())
+ return -EBUSY;
WARN_ONCE(!irqs_disabled(),
"Interrupts enabled before system core suspend.\n");