diff options
Diffstat (limited to 'drivers/base')
-rw-r--r-- | drivers/base/dma-coherent.c | 164 | ||||
-rw-r--r-- | drivers/base/dma-mapping.c | 2 | ||||
-rw-r--r-- | drivers/base/firmware_class.c | 49 | ||||
-rw-r--r-- | drivers/base/power/domain.c | 259 | ||||
-rw-r--r-- | drivers/base/power/wakeup.c | 8 | ||||
-rw-r--r-- | drivers/base/regmap/regmap-w1.c | 4 |
6 files changed, 386 insertions, 100 deletions
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c index 2ae24c28e70c..1c152aed6b82 100644 --- a/drivers/base/dma-coherent.c +++ b/drivers/base/dma-coherent.c @@ -25,7 +25,7 @@ static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *de { if (dev && dev->dma_mem) return dev->dma_mem; - return dma_coherent_default_memory; + return NULL; } static inline dma_addr_t dma_get_device_base(struct device *dev, @@ -165,34 +165,15 @@ void *dma_mark_declared_memory_occupied(struct device *dev, } EXPORT_SYMBOL(dma_mark_declared_memory_occupied); -/** - * dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area - * - * @dev: device from which we allocate memory - * @size: size of requested memory area - * @dma_handle: This will be filled with the correct dma handle - * @ret: This pointer will be filled with the virtual address - * to allocated area. - * - * This function should be only called from per-arch dma_alloc_coherent() - * to support allocation from per-device coherent memory pools. - * - * Returns 0 if dma_alloc_coherent should continue with allocating from - * generic memory areas, or !0 if dma_alloc_coherent should return @ret. - */ -int dma_alloc_from_coherent(struct device *dev, ssize_t size, - dma_addr_t *dma_handle, void **ret) +static void *__dma_alloc_from_coherent(struct dma_coherent_mem *mem, + ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); int order = get_order(size); unsigned long flags; int pageno; int dma_memory_map; + void *ret; - if (!mem) - return 0; - - *ret = NULL; spin_lock_irqsave(&mem->spinlock, flags); if (unlikely(size > (mem->size << PAGE_SHIFT))) @@ -203,21 +184,50 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size, goto err; /* - * Memory was found in the per-device area. + * Memory was found in the coherent area. */ - *dma_handle = dma_get_device_base(dev, mem) + (pageno << PAGE_SHIFT); - *ret = mem->virt_base + (pageno << PAGE_SHIFT); + *dma_handle = mem->device_base + (pageno << PAGE_SHIFT); + ret = mem->virt_base + (pageno << PAGE_SHIFT); dma_memory_map = (mem->flags & DMA_MEMORY_MAP); spin_unlock_irqrestore(&mem->spinlock, flags); if (dma_memory_map) - memset(*ret, 0, size); + memset(ret, 0, size); else - memset_io(*ret, 0, size); + memset_io(ret, 0, size); - return 1; + return ret; err: spin_unlock_irqrestore(&mem->spinlock, flags); + return NULL; +} + +/** + * dma_alloc_from_dev_coherent() - allocate memory from device coherent pool + * @dev: device from which we allocate memory + * @size: size of requested memory area + * @dma_handle: This will be filled with the correct dma handle + * @ret: This pointer will be filled with the virtual address + * to allocated area. + * + * This function should be only called from per-arch dma_alloc_coherent() + * to support allocation from per-device coherent memory pools. + * + * Returns 0 if dma_alloc_coherent should continue with allocating from + * generic memory areas, or !0 if dma_alloc_coherent should return @ret. + */ +int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size, + dma_addr_t *dma_handle, void **ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + if (!mem) + return 0; + + *ret = __dma_alloc_from_coherent(mem, size, dma_handle); + if (*ret) + return 1; + /* * In the case where the allocation can not be satisfied from the * per-device area, try to fall back to generic memory if the @@ -225,25 +235,20 @@ err: */ return mem->flags & DMA_MEMORY_EXCLUSIVE; } -EXPORT_SYMBOL(dma_alloc_from_coherent); +EXPORT_SYMBOL(dma_alloc_from_dev_coherent); -/** - * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool - * @dev: device from which the memory was allocated - * @order: the order of pages allocated - * @vaddr: virtual address of allocated pages - * - * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, releases that memory. - * - * Returns 1 if we correctly released the memory, or 0 if - * dma_release_coherent() should proceed with releasing memory from - * generic pools. - */ -int dma_release_from_coherent(struct device *dev, int order, void *vaddr) +void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle) { - struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + if (!dma_coherent_default_memory) + return NULL; + + return __dma_alloc_from_coherent(dma_coherent_default_memory, size, + dma_handle); +} +static int __dma_release_from_coherent(struct dma_coherent_mem *mem, + int order, void *vaddr) +{ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) { int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; @@ -256,28 +261,39 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr) } return 0; } -EXPORT_SYMBOL(dma_release_from_coherent); /** - * dma_mmap_from_coherent() - try to mmap the memory allocated from - * per-device coherent memory pool to userspace + * dma_release_from_dev_coherent() - free memory to device coherent memory pool * @dev: device from which the memory was allocated - * @vma: vm_area for the userspace memory - * @vaddr: cpu address returned by dma_alloc_from_coherent - * @size: size of the memory buffer allocated by dma_alloc_from_coherent - * @ret: result from remap_pfn_range() + * @order: the order of pages allocated + * @vaddr: virtual address of allocated pages * * This checks whether the memory was allocated from the per-device - * coherent memory pool and if so, maps that memory to the provided vma. + * coherent memory pool and if so, releases that memory. * - * Returns 1 if we correctly mapped the memory, or 0 if the caller should - * proceed with mapping memory from generic pools. + * Returns 1 if we correctly released the memory, or 0 if the caller should + * proceed with releasing memory from generic pools. */ -int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, - void *vaddr, size_t size, int *ret) +int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr) { struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + return __dma_release_from_coherent(mem, order, vaddr); +} +EXPORT_SYMBOL(dma_release_from_dev_coherent); + +int dma_release_from_global_coherent(int order, void *vaddr) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_release_from_coherent(dma_coherent_default_memory, order, + vaddr); +} + +static int __dma_mmap_from_coherent(struct dma_coherent_mem *mem, + struct vm_area_struct *vma, void *vaddr, size_t size, int *ret) +{ if (mem && vaddr >= mem->virt_base && vaddr + size <= (mem->virt_base + (mem->size << PAGE_SHIFT))) { unsigned long off = vma->vm_pgoff; @@ -296,7 +312,39 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, } return 0; } -EXPORT_SYMBOL(dma_mmap_from_coherent); + +/** + * dma_mmap_from_dev_coherent() - mmap memory from the device coherent pool + * @dev: device from which the memory was allocated + * @vma: vm_area for the userspace memory + * @vaddr: cpu address returned by dma_alloc_from_dev_coherent + * @size: size of the memory buffer allocated + * @ret: result from remap_pfn_range() + * + * This checks whether the memory was allocated from the per-device + * coherent memory pool and if so, maps that memory to the provided vma. + * + * Returns 1 if we correctly mapped the memory, or 0 if the caller should + * proceed with mapping memory from generic pools. + */ +int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma, + void *vaddr, size_t size, int *ret) +{ + struct dma_coherent_mem *mem = dev_get_coherent_memory(dev); + + return __dma_mmap_from_coherent(mem, vma, vaddr, size, ret); +} +EXPORT_SYMBOL(dma_mmap_from_dev_coherent); + +int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *vaddr, + size_t size, int *ret) +{ + if (!dma_coherent_default_memory) + return 0; + + return __dma_mmap_from_coherent(dma_coherent_default_memory, vma, + vaddr, size, ret); +} /* * Support for reserved memory regions defined in device tree diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c index 5096755d185e..b555ff9dd8fc 100644 --- a/drivers/base/dma-mapping.c +++ b/drivers/base/dma-mapping.c @@ -235,7 +235,7 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) + if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < count && user_count <= (count - off)) { diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c index b9f907eedbf7..bfbe1e154128 100644 --- a/drivers/base/firmware_class.c +++ b/drivers/base/firmware_class.c @@ -30,7 +30,6 @@ #include <linux/syscore_ops.h> #include <linux/reboot.h> #include <linux/security.h> -#include <linux/swait.h> #include <generated/utsrelease.h> @@ -112,13 +111,13 @@ static inline long firmware_loading_timeout(void) * state of the firmware loading. */ struct fw_state { - struct swait_queue_head wq; + struct completion completion; enum fw_status status; }; static void fw_state_init(struct fw_state *fw_st) { - init_swait_queue_head(&fw_st->wq); + init_completion(&fw_st->completion); fw_st->status = FW_STATUS_UNKNOWN; } @@ -131,9 +130,7 @@ static int __fw_state_wait_common(struct fw_state *fw_st, long timeout) { long ret; - ret = swait_event_interruptible_timeout(fw_st->wq, - __fw_state_is_done(READ_ONCE(fw_st->status)), - timeout); + ret = wait_for_completion_killable_timeout(&fw_st->completion, timeout); if (ret != 0 && fw_st->status == FW_STATUS_ABORTED) return -ENOENT; if (!ret) @@ -148,35 +145,34 @@ static void __fw_state_set(struct fw_state *fw_st, WRITE_ONCE(fw_st->status, status); if (status == FW_STATUS_DONE || status == FW_STATUS_ABORTED) - swake_up(&fw_st->wq); + complete_all(&fw_st->completion); } #define fw_state_start(fw_st) \ __fw_state_set(fw_st, FW_STATUS_LOADING) #define fw_state_done(fw_st) \ __fw_state_set(fw_st, FW_STATUS_DONE) +#define fw_state_aborted(fw_st) \ + __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_wait(fw_st) \ __fw_state_wait_common(fw_st, MAX_SCHEDULE_TIMEOUT) -#ifndef CONFIG_FW_LOADER_USER_HELPER - -#define fw_state_is_aborted(fw_st) false - -#else /* CONFIG_FW_LOADER_USER_HELPER */ - static int __fw_state_check(struct fw_state *fw_st, enum fw_status status) { return fw_st->status == status; } +#define fw_state_is_aborted(fw_st) \ + __fw_state_check(fw_st, FW_STATUS_ABORTED) + +#ifdef CONFIG_FW_LOADER_USER_HELPER + #define fw_state_aborted(fw_st) \ __fw_state_set(fw_st, FW_STATUS_ABORTED) #define fw_state_is_done(fw_st) \ __fw_state_check(fw_st, FW_STATUS_DONE) #define fw_state_is_loading(fw_st) \ __fw_state_check(fw_st, FW_STATUS_LOADING) -#define fw_state_is_aborted(fw_st) \ - __fw_state_check(fw_st, FW_STATUS_ABORTED) #define fw_state_wait_timeout(fw_st, timeout) \ __fw_state_wait_common(fw_st, timeout) @@ -1200,6 +1196,28 @@ _request_firmware_prepare(struct firmware **firmware_p, const char *name, return 1; /* need to load */ } +/* + * Batched requests need only one wake, we need to do this step last due to the + * fallback mechanism. The buf is protected with kref_get(), and it won't be + * released until the last user calls release_firmware(). + * + * Failed batched requests are possible as well, in such cases we just share + * the struct firmware_buf and won't release it until all requests are woken + * and have gone through this same path. + */ +static void fw_abort_batch_reqs(struct firmware *fw) +{ + struct firmware_buf *buf; + + /* Loaded directly? */ + if (!fw || !fw->priv) + return; + + buf = fw->priv; + if (!fw_state_is_aborted(&buf->fw_st)) + fw_state_aborted(&buf->fw_st); +} + /* called from request_firmware() and request_firmware_work_func() */ static int _request_firmware(const struct firmware **firmware_p, const char *name, @@ -1243,6 +1261,7 @@ _request_firmware(const struct firmware **firmware_p, const char *name, out: if (ret < 0) { + fw_abort_batch_reqs(fw); release_firmware(fw); fw = NULL; } diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index 3b8210ebb50e..e8ca5e2cf1e5 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -209,6 +209,34 @@ static void genpd_sd_counter_inc(struct generic_pm_domain *genpd) smp_mb__after_atomic(); } +#ifdef CONFIG_DEBUG_FS +static void genpd_update_accounting(struct generic_pm_domain *genpd) +{ + ktime_t delta, now; + + now = ktime_get(); + delta = ktime_sub(now, genpd->accounting_time); + + /* + * If genpd->status is active, it means we are just + * out of off and so update the idle time and vice + * versa. + */ + if (genpd->status == GPD_STATE_ACTIVE) { + int state_idx = genpd->state_idx; + + genpd->states[state_idx].idle_time = + ktime_add(genpd->states[state_idx].idle_time, delta); + } else { + genpd->on_time = ktime_add(genpd->on_time, delta); + } + + genpd->accounting_time = now; +} +#else +static inline void genpd_update_accounting(struct generic_pm_domain *genpd) {} +#endif + static int _genpd_power_on(struct generic_pm_domain *genpd, bool timed) { unsigned int state_idx = genpd->state_idx; @@ -361,6 +389,7 @@ static int genpd_power_off(struct generic_pm_domain *genpd, bool one_dev_on, } genpd->status = GPD_STATE_POWER_OFF; + genpd_update_accounting(genpd); list_for_each_entry(link, &genpd->slave_links, slave_node) { genpd_sd_counter_dec(link->master); @@ -413,6 +442,8 @@ static int genpd_power_on(struct generic_pm_domain *genpd, unsigned int depth) goto err; genpd->status = GPD_STATE_ACTIVE; + genpd_update_accounting(genpd); + return 0; err: @@ -1222,8 +1253,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, spin_unlock_irq(&dev->power.lock); - dev_pm_domain_set(dev, &genpd->domain); - return gpd_data; err_free: @@ -1237,8 +1266,6 @@ static struct generic_pm_domain_data *genpd_alloc_dev_data(struct device *dev, static void genpd_free_dev_data(struct device *dev, struct generic_pm_domain_data *gpd_data) { - dev_pm_domain_set(dev, NULL); - spin_lock_irq(&dev->power.lock); dev->power.subsys_data->domain_data = NULL; @@ -1275,6 +1302,8 @@ static int genpd_add_device(struct generic_pm_domain *genpd, struct device *dev, if (ret) goto out; + dev_pm_domain_set(dev, &genpd->domain); + genpd->device_count++; genpd->max_off_time_changed = true; @@ -1336,6 +1365,8 @@ static int genpd_remove_device(struct generic_pm_domain *genpd, if (genpd->detach_dev) genpd->detach_dev(genpd, dev); + dev_pm_domain_set(dev, NULL); + list_del_init(&pdd->list_node); genpd_unlock(genpd); @@ -1540,6 +1571,7 @@ int pm_genpd_init(struct generic_pm_domain *genpd, genpd->max_off_time_changed = true; genpd->provider = NULL; genpd->has_provider = false; + genpd->accounting_time = ktime_get(); genpd->domain.ops.runtime_suspend = genpd_runtime_suspend; genpd->domain.ops.runtime_resume = genpd_runtime_resume; genpd->domain.ops.prepare = pm_genpd_prepare; @@ -1743,7 +1775,7 @@ static int genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, mutex_lock(&of_genpd_mutex); list_add(&cp->link, &of_genpd_providers); mutex_unlock(&of_genpd_mutex); - pr_debug("Added domain provider from %s\n", np->full_name); + pr_debug("Added domain provider from %pOF\n", np); return 0; } @@ -2149,16 +2181,16 @@ static int genpd_parse_state(struct genpd_power_state *genpd_state, err = of_property_read_u32(state_node, "entry-latency-us", &entry_latency); if (err) { - pr_debug(" * %s missing entry-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing entry-latency-us property\n", + state_node); return -EINVAL; } err = of_property_read_u32(state_node, "exit-latency-us", &exit_latency); if (err) { - pr_debug(" * %s missing exit-latency-us property\n", - state_node->full_name); + pr_debug(" * %pOF missing exit-latency-us property\n", + state_node); return -EINVAL; } @@ -2212,8 +2244,8 @@ int of_genpd_parse_idle_states(struct device_node *dn, ret = genpd_parse_state(&st[i++], np); if (ret) { pr_err - ("Parsing idle state node %s failed with err %d\n", - np->full_name, ret); + ("Parsing idle state node %pOF failed with err %d\n", + np, ret); of_node_put(np); kfree(st); return ret; @@ -2327,7 +2359,7 @@ exit: return 0; } -static int pm_genpd_summary_show(struct seq_file *s, void *data) +static int genpd_summary_show(struct seq_file *s, void *data) { struct generic_pm_domain *genpd; int ret = 0; @@ -2350,21 +2382,187 @@ static int pm_genpd_summary_show(struct seq_file *s, void *data) return ret; } -static int pm_genpd_summary_open(struct inode *inode, struct file *file) +static int genpd_status_show(struct seq_file *s, void *data) { - return single_open(file, pm_genpd_summary_show, NULL); + static const char * const status_lookup[] = { + [GPD_STATE_ACTIVE] = "on", + [GPD_STATE_POWER_OFF] = "off" + }; + + struct generic_pm_domain *genpd = s->private; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (WARN_ON_ONCE(genpd->status >= ARRAY_SIZE(status_lookup))) + goto exit; + + if (genpd->status == GPD_STATE_POWER_OFF) + seq_printf(s, "%s-%u\n", status_lookup[genpd->status], + genpd->state_idx); + else + seq_printf(s, "%s\n", status_lookup[genpd->status]); +exit: + genpd_unlock(genpd); + return ret; } -static const struct file_operations pm_genpd_summary_fops = { - .open = pm_genpd_summary_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +static int genpd_sub_domains_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct gpd_link *link; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(link, &genpd->master_links, master_node) + seq_printf(s, "%s\n", link->slave->name); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_idle_states_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + seq_puts(s, "State Time Spent(ms)\n"); + + for (i = 0; i < genpd->state_count; i++) { + ktime_t delta = 0; + s64 msecs; + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + msecs = ktime_to_ms( + ktime_add(genpd->states[i].idle_time, delta)); + seq_printf(s, "S%-13i %lld\n", i, msecs); + } + + genpd_unlock(genpd); + return ret; +} + +static int genpd_active_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + if (genpd->status == GPD_STATE_ACTIVE) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + seq_printf(s, "%lld ms\n", ktime_to_ms( + ktime_add(genpd->on_time, delta))); + + genpd_unlock(genpd); + return ret; +} + +static int genpd_total_idle_time_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + ktime_t delta = 0, total = 0; + unsigned int i; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + for (i = 0; i < genpd->state_count; i++) { + + if ((genpd->status == GPD_STATE_POWER_OFF) && + (genpd->state_idx == i)) + delta = ktime_sub(ktime_get(), genpd->accounting_time); + + total = ktime_add(total, genpd->states[i].idle_time); + } + total = ktime_add(total, delta); + + seq_printf(s, "%lld ms\n", ktime_to_ms(total)); + + genpd_unlock(genpd); + return ret; +} + + +static int genpd_devices_show(struct seq_file *s, void *data) +{ + struct generic_pm_domain *genpd = s->private; + struct pm_domain_data *pm_data; + const char *kobj_path; + int ret = 0; + + ret = genpd_lock_interruptible(genpd); + if (ret) + return -ERESTARTSYS; + + list_for_each_entry(pm_data, &genpd->dev_list, list_node) { + kobj_path = kobject_get_path(&pm_data->dev->kobj, + genpd_is_irq_safe(genpd) ? + GFP_ATOMIC : GFP_KERNEL); + if (kobj_path == NULL) + continue; + + seq_printf(s, "%s\n", kobj_path); + kfree(kobj_path); + } + + genpd_unlock(genpd); + return ret; +} + +#define define_genpd_open_function(name) \ +static int genpd_##name##_open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, genpd_##name##_show, inode->i_private); \ +} + +define_genpd_open_function(summary); +define_genpd_open_function(status); +define_genpd_open_function(sub_domains); +define_genpd_open_function(idle_states); +define_genpd_open_function(active_time); +define_genpd_open_function(total_idle_time); +define_genpd_open_function(devices); + +#define define_genpd_debugfs_fops(name) \ +static const struct file_operations genpd_##name##_fops = { \ + .open = genpd_##name##_open, \ + .read = seq_read, \ + .llseek = seq_lseek, \ + .release = single_release, \ +} + +define_genpd_debugfs_fops(summary); +define_genpd_debugfs_fops(status); +define_genpd_debugfs_fops(sub_domains); +define_genpd_debugfs_fops(idle_states); +define_genpd_debugfs_fops(active_time); +define_genpd_debugfs_fops(total_idle_time); +define_genpd_debugfs_fops(devices); static int __init pm_genpd_debug_init(void) { struct dentry *d; + struct generic_pm_domain *genpd; pm_genpd_debugfs_dir = debugfs_create_dir("pm_genpd", NULL); @@ -2372,10 +2570,29 @@ static int __init pm_genpd_debug_init(void) return -ENOMEM; d = debugfs_create_file("pm_genpd_summary", S_IRUGO, - pm_genpd_debugfs_dir, NULL, &pm_genpd_summary_fops); + pm_genpd_debugfs_dir, NULL, &genpd_summary_fops); if (!d) return -ENOMEM; + list_for_each_entry(genpd, &gpd_list, gpd_list_node) { + d = debugfs_create_dir(genpd->name, pm_genpd_debugfs_dir); + if (!d) + return -ENOMEM; + + debugfs_create_file("current_state", 0444, + d, genpd, &genpd_status_fops); + debugfs_create_file("sub_domains", 0444, + d, genpd, &genpd_sub_domains_fops); + debugfs_create_file("idle_states", 0444, + d, genpd, &genpd_idle_states_fops); + debugfs_create_file("active_time", 0444, + d, genpd, &genpd_active_time_fops); + debugfs_create_file("total_idle_time", 0444, + d, genpd, &genpd_total_idle_time_fops); + debugfs_create_file("devices", 0444, + d, genpd, &genpd_devices_fops); + } + return 0; } late_initcall(pm_genpd_debug_init); diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c index 144e6d8fafc8..b49efe33099e 100644 --- a/drivers/base/power/wakeup.c +++ b/drivers/base/power/wakeup.c @@ -412,15 +412,17 @@ void device_set_wakeup_capable(struct device *dev, bool capable) if (!!dev->power.can_wakeup == !!capable) return; + dev->power.can_wakeup = capable; if (device_is_registered(dev) && !list_empty(&dev->power.entry)) { if (capable) { - if (wakeup_sysfs_add(dev)) - return; + int ret = wakeup_sysfs_add(dev); + + if (ret) + dev_info(dev, "Wakeup sysfs attributes not added\n"); } else { wakeup_sysfs_remove(dev); } } - dev->power.can_wakeup = capable; } EXPORT_SYMBOL_GPL(device_set_wakeup_capable); diff --git a/drivers/base/regmap/regmap-w1.c b/drivers/base/regmap/regmap-w1.c index 5f04e7bf063e..e6c64b0be5b2 100644 --- a/drivers/base/regmap/regmap-w1.c +++ b/drivers/base/regmap/regmap-w1.c @@ -1,7 +1,7 @@ /* * Register map access API - W1 (1-Wire) support * - * Copyright (C) 2017 OAO Radioavionica + * Copyright (c) 2017 Radioavionica Corporation * Author: Alex A. Mihaylov <minimumlaw@rambler.ru> * * This program is free software; you can redistribute it and/or modify @@ -11,7 +11,7 @@ #include <linux/regmap.h> #include <linux/module.h> -#include "../../w1/w1.h" +#include <linux/w1.h> #include "internal.h" |