diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-02-02 23:24:44 +0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2012-02-02 23:24:44 +0400 |
commit | bd1d462e13b278fc57752d0b9b15040e60e561a0 (patch) | |
tree | e2fdf1c18a93aab02830bcb8a5db8cdddfbb63a8 /drivers/acpi | |
parent | d5c38b137ac8a6e3dbed13bc494d60df5b69dfc4 (diff) | |
parent | 62aa2b537c6f5957afd98e29f96897419ed5ebab (diff) | |
download | linux-bd1d462e13b278fc57752d0b9b15040e60e561a0.tar.xz |
Merge 3.3-rc2 into the driver-core-next branch.
This was done to resolve a merge and build problem with the
drivers/acpi/processor_driver.c file.
Reported-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/acpi')
-rw-r--r-- | drivers/acpi/Makefile | 1 | ||||
-rw-r--r-- | drivers/acpi/apei/apei-base.c | 35 | ||||
-rw-r--r-- | drivers/acpi/apei/einj.c | 95 | ||||
-rw-r--r-- | drivers/acpi/atomicio.c | 422 | ||||
-rw-r--r-- | drivers/acpi/osl.c | 152 | ||||
-rw-r--r-- | drivers/acpi/processor_driver.c | 135 | ||||
-rw-r--r-- | drivers/acpi/sleep.c | 8 |
7 files changed, 318 insertions, 530 deletions
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index c07f44f05f9d..1567028d2038 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -19,7 +19,6 @@ obj-y += acpi.o \ # All the builtin files are in the "acpi." module_param namespace. acpi-y += osl.o utils.o reboot.o -acpi-y += atomicio.o acpi-y += nvs.o # sleep related files diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c index e45350cb6ac8..e5d53b7ddc7e 100644 --- a/drivers/acpi/apei/apei-base.c +++ b/drivers/acpi/apei/apei-base.c @@ -596,33 +596,19 @@ int apei_read(u64 *val, struct acpi_generic_address *reg) { int rc; u64 address; - u32 tmp, width = reg->bit_width; acpi_status status; rc = apei_check_gar(reg, &address); if (rc) return rc; - if (width == 64) - width = 32; /* Break into two 32-bit transfers */ - *val = 0; switch(reg->space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: - status = acpi_os_read_memory((acpi_physical_address) - address, &tmp, width); + status = acpi_os_read_memory64((acpi_physical_address) + address, val, reg->bit_width); if (ACPI_FAILURE(status)) return -EIO; - *val = tmp; - - if (reg->bit_width == 64) { - /* Read the top 32 bits */ - status = acpi_os_read_memory((acpi_physical_address) - (address + 4), &tmp, 32); - if (ACPI_FAILURE(status)) - return -EIO; - *val |= ((u64)tmp << 32); - } break; case ACPI_ADR_SPACE_SYSTEM_IO: status = acpi_os_read_port(address, (u32 *)val, reg->bit_width); @@ -642,31 +628,18 @@ int apei_write(u64 val, struct acpi_generic_address *reg) { int rc; u64 address; - u32 width = reg->bit_width; acpi_status status; rc = apei_check_gar(reg, &address); if (rc) return rc; - if (width == 64) - width = 32; /* Break into two 32-bit transfers */ - switch (reg->space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: - status = acpi_os_write_memory((acpi_physical_address) - address, ACPI_LODWORD(val), - width); + status = acpi_os_write_memory64((acpi_physical_address) + address, val, reg->bit_width); if (ACPI_FAILURE(status)) return -EIO; - - if (reg->bit_width == 64) { - status = acpi_os_write_memory((acpi_physical_address) - (address + 4), - ACPI_HIDWORD(val), 32); - if (ACPI_FAILURE(status)) - return -EIO; - } break; case ACPI_ADR_SPACE_SYSTEM_IO: status = acpi_os_write_port(address, val, reg->bit_width); diff --git a/drivers/acpi/apei/einj.c b/drivers/acpi/apei/einj.c index 5b898d4dda99..4ca087dd5f4f 100644 --- a/drivers/acpi/apei/einj.c +++ b/drivers/acpi/apei/einj.c @@ -141,21 +141,6 @@ static DEFINE_MUTEX(einj_mutex); static void *einj_param; -#ifndef readq -static inline __u64 readq(volatile void __iomem *addr) -{ - return ((__u64)readl(addr+4) << 32) + readl(addr); -} -#endif - -#ifndef writeq -static inline void writeq(__u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val >> 32, addr+4); -} -#endif - static void einj_exec_ctx_init(struct apei_exec_context *ctx) { apei_exec_ctx_init(ctx, einj_ins_type, ARRAY_SIZE(einj_ins_type), @@ -204,22 +189,21 @@ static int einj_timedout(u64 *t) static void check_vendor_extension(u64 paddr, struct set_error_type_with_address *v5param) { - int offset = readl(&v5param->vendor_extension); + int offset = v5param->vendor_extension; struct vendor_error_type_extension *v; u32 sbdf; if (!offset) return; - v = ioremap(paddr + offset, sizeof(*v)); + v = acpi_os_map_memory(paddr + offset, sizeof(*v)); if (!v) return; - sbdf = readl(&v->pcie_sbdf); + sbdf = v->pcie_sbdf; sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n", sbdf >> 24, (sbdf >> 16) & 0xff, (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7, - readw(&v->vendor_id), readw(&v->device_id), - readb(&v->rev_id)); - iounmap(v); + v->vendor_id, v->device_id, v->rev_id); + acpi_os_unmap_memory(v, sizeof(*v)); } static void *einj_get_parameter_address(void) @@ -247,7 +231,7 @@ static void *einj_get_parameter_address(void) if (paddrv5) { struct set_error_type_with_address *v5param; - v5param = ioremap(paddrv5, sizeof(*v5param)); + v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param)); if (v5param) { acpi5 = 1; check_vendor_extension(paddrv5, v5param); @@ -257,17 +241,17 @@ static void *einj_get_parameter_address(void) if (paddrv4) { struct einj_parameter *v4param; - v4param = ioremap(paddrv4, sizeof(*v4param)); + v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param)); if (!v4param) - return 0; - if (readq(&v4param->reserved1) || readq(&v4param->reserved2)) { - iounmap(v4param); - return 0; + return NULL; + if (v4param->reserved1 || v4param->reserved2) { + acpi_os_unmap_memory(v4param, sizeof(*v4param)); + return NULL; } return v4param; } - return 0; + return NULL; } /* do sanity check to trigger table */ @@ -276,7 +260,7 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab) if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger)) return -EINVAL; if (trigger_tab->table_size > PAGE_SIZE || - trigger_tab->table_size <= trigger_tab->header_size) + trigger_tab->table_size < trigger_tab->header_size) return -EINVAL; if (trigger_tab->entry_count != (trigger_tab->table_size - trigger_tab->header_size) / @@ -340,6 +324,11 @@ static int __einj_error_trigger(u64 trigger_paddr, u32 type, "The trigger error action table is invalid\n"); goto out_rel_header; } + + /* No action structures in the TRIGGER_ERROR table, nothing to do */ + if (!trigger_tab->entry_count) + goto out_rel_header; + rc = -EIO; table_size = trigger_tab->table_size; r = request_mem_region(trigger_paddr + sizeof(*trigger_tab), @@ -435,41 +424,41 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) if (acpi5) { struct set_error_type_with_address *v5param = einj_param; - writel(type, &v5param->type); + v5param->type = type; if (type & 0x80000000) { switch (vendor_flags) { case SETWA_FLAGS_APICID: - writel(param1, &v5param->apicid); + v5param->apicid = param1; break; case SETWA_FLAGS_MEM: - writeq(param1, &v5param->memory_address); - writeq(param2, &v5param->memory_address_range); + v5param->memory_address = param1; + v5param->memory_address_range = param2; break; case SETWA_FLAGS_PCIE_SBDF: - writel(param1, &v5param->pcie_sbdf); + v5param->pcie_sbdf = param1; break; } - writel(vendor_flags, &v5param->flags); + v5param->flags = vendor_flags; } else { switch (type) { case ACPI_EINJ_PROCESSOR_CORRECTABLE: case ACPI_EINJ_PROCESSOR_UNCORRECTABLE: case ACPI_EINJ_PROCESSOR_FATAL: - writel(param1, &v5param->apicid); - writel(SETWA_FLAGS_APICID, &v5param->flags); + v5param->apicid = param1; + v5param->flags = SETWA_FLAGS_APICID; break; case ACPI_EINJ_MEMORY_CORRECTABLE: case ACPI_EINJ_MEMORY_UNCORRECTABLE: case ACPI_EINJ_MEMORY_FATAL: - writeq(param1, &v5param->memory_address); - writeq(param2, &v5param->memory_address_range); - writel(SETWA_FLAGS_MEM, &v5param->flags); + v5param->memory_address = param1; + v5param->memory_address_range = param2; + v5param->flags = SETWA_FLAGS_MEM; break; case ACPI_EINJ_PCIX_CORRECTABLE: case ACPI_EINJ_PCIX_UNCORRECTABLE: case ACPI_EINJ_PCIX_FATAL: - writel(param1, &v5param->pcie_sbdf); - writel(SETWA_FLAGS_PCIE_SBDF, &v5param->flags); + v5param->pcie_sbdf = param1; + v5param->flags = SETWA_FLAGS_PCIE_SBDF; break; } } @@ -479,8 +468,8 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2) return rc; if (einj_param) { struct einj_parameter *v4param = einj_param; - writeq(param1, &v4param->param1); - writeq(param2, &v4param->param2); + v4param->param1 = param1; + v4param->param2 = param2; } } rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION); @@ -731,8 +720,13 @@ static int __init einj_init(void) return 0; err_unmap: - if (einj_param) - iounmap(einj_param); + if (einj_param) { + acpi_size size = (acpi5) ? + sizeof(struct set_error_type_with_address) : + sizeof(struct einj_parameter); + + acpi_os_unmap_memory(einj_param, size); + } apei_exec_post_unmap_gars(&ctx); err_release: apei_resources_release(&einj_resources); @@ -748,8 +742,13 @@ static void __exit einj_exit(void) { struct apei_exec_context ctx; - if (einj_param) - iounmap(einj_param); + if (einj_param) { + acpi_size size = (acpi5) ? + sizeof(struct set_error_type_with_address) : + sizeof(struct einj_parameter); + + acpi_os_unmap_memory(einj_param, size); + } einj_exec_ctx_init(&ctx); apei_exec_post_unmap_gars(&ctx); apei_resources_release(&einj_resources); diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c deleted file mode 100644 index d4a5b3d3657b..000000000000 --- a/drivers/acpi/atomicio.c +++ /dev/null @@ -1,422 +0,0 @@ -/* - * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then - * accessing in atomic context. - * - * This is used for NMI handler to access IO memory area, because - * ioremap/iounmap can not be used in NMI handler. The IO memory area - * is pre-mapped in process context and accessed in NMI handler. - * - * Copyright (C) 2009-2010, Intel Corp. - * Author: Huang Ying <ying.huang@intel.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - */ - -#include <linux/kernel.h> -#include <linux/export.h> -#include <linux/init.h> -#include <linux/acpi.h> -#include <linux/io.h> -#include <linux/kref.h> -#include <linux/rculist.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/mm.h> -#include <linux/highmem.h> -#include <acpi/atomicio.h> - -#define ACPI_PFX "ACPI: " - -static LIST_HEAD(acpi_iomaps); -/* - * Used for mutual exclusion between writers of acpi_iomaps list, for - * synchronization between readers and writer, RCU is used. - */ -static DEFINE_SPINLOCK(acpi_iomaps_lock); - -struct acpi_iomap { - struct list_head list; - void __iomem *vaddr; - unsigned long size; - phys_addr_t paddr; - struct kref ref; -}; - -/* acpi_iomaps_lock or RCU read lock must be held before calling */ -static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - list_for_each_entry_rcu(map, &acpi_iomaps, list) { - if (map->paddr + map->size >= paddr + size && - map->paddr <= paddr) - return map; - } - return NULL; -} - -/* - * Atomic "ioremap" used by NMI handler, if the specified IO memory - * area is not pre-mapped, NULL will be returned. - * - * acpi_iomaps_lock or RCU read lock must be held before calling - */ -static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - map = __acpi_find_iomap(paddr, size/8); - if (map) - return map->vaddr + (paddr - map->paddr); - else - return NULL; -} - -/* acpi_iomaps_lock must be held before calling */ -static void __iomem *__acpi_try_ioremap(phys_addr_t paddr, - unsigned long size) -{ - struct acpi_iomap *map; - - map = __acpi_find_iomap(paddr, size); - if (map) { - kref_get(&map->ref); - return map->vaddr + (paddr - map->paddr); - } else - return NULL; -} - -#ifndef CONFIG_IA64 -#define should_use_kmap(pfn) page_is_ram(pfn) -#else -/* ioremap will take care of cache attributes */ -#define should_use_kmap(pfn) 0 -#endif - -static void __iomem *acpi_map(phys_addr_t pg_off, unsigned long pg_sz) -{ - unsigned long pfn; - - pfn = pg_off >> PAGE_SHIFT; - if (should_use_kmap(pfn)) { - if (pg_sz > PAGE_SIZE) - return NULL; - return (void __iomem __force *)kmap(pfn_to_page(pfn)); - } else - return ioremap(pg_off, pg_sz); -} - -static void acpi_unmap(phys_addr_t pg_off, void __iomem *vaddr) -{ - unsigned long pfn; - - pfn = pg_off >> PAGE_SHIFT; - if (page_is_ram(pfn)) - kunmap(pfn_to_page(pfn)); - else - iounmap(vaddr); -} - -/* - * Used to pre-map the specified IO memory area. First try to find - * whether the area is already pre-mapped, if it is, increase the - * reference count (in __acpi_try_ioremap) and return; otherwise, do - * the real ioremap, and add the mapping into acpi_iomaps list. - */ -static void __iomem *acpi_pre_map(phys_addr_t paddr, - unsigned long size) -{ - void __iomem *vaddr; - struct acpi_iomap *map; - unsigned long pg_sz, flags; - phys_addr_t pg_off; - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - vaddr = __acpi_try_ioremap(paddr, size); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - if (vaddr) - return vaddr; - - pg_off = paddr & PAGE_MASK; - pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off; - vaddr = acpi_map(pg_off, pg_sz); - if (!vaddr) - return NULL; - map = kmalloc(sizeof(*map), GFP_KERNEL); - if (!map) - goto err_unmap; - INIT_LIST_HEAD(&map->list); - map->paddr = pg_off; - map->size = pg_sz; - map->vaddr = vaddr; - kref_init(&map->ref); - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - vaddr = __acpi_try_ioremap(paddr, size); - if (vaddr) { - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - acpi_unmap(pg_off, map->vaddr); - kfree(map); - return vaddr; - } - list_add_tail_rcu(&map->list, &acpi_iomaps); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - - return map->vaddr + (paddr - map->paddr); -err_unmap: - acpi_unmap(pg_off, vaddr); - return NULL; -} - -/* acpi_iomaps_lock must be held before calling */ -static void __acpi_kref_del_iomap(struct kref *ref) -{ - struct acpi_iomap *map; - - map = container_of(ref, struct acpi_iomap, ref); - list_del_rcu(&map->list); -} - -/* - * Used to post-unmap the specified IO memory area. The iounmap is - * done only if the reference count goes zero. - */ -static void acpi_post_unmap(phys_addr_t paddr, unsigned long size) -{ - struct acpi_iomap *map; - unsigned long flags; - int del; - - spin_lock_irqsave(&acpi_iomaps_lock, flags); - map = __acpi_find_iomap(paddr, size); - BUG_ON(!map); - del = kref_put(&map->ref, __acpi_kref_del_iomap); - spin_unlock_irqrestore(&acpi_iomaps_lock, flags); - - if (!del) - return; - - synchronize_rcu(); - acpi_unmap(map->paddr, map->vaddr); - kfree(map); -} - -/* In NMI handler, should set silent = 1 */ -static int acpi_check_gar(struct acpi_generic_address *reg, - u64 *paddr, int silent) -{ - u32 width, space_id; - - width = reg->bit_width; - space_id = reg->space_id; - /* Handle possible alignment issues */ - memcpy(paddr, ®->address, sizeof(*paddr)); - if (!*paddr) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid physical address in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid bit width in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY && - space_id != ACPI_ADR_SPACE_SYSTEM_IO) { - if (!silent) - pr_warning(FW_BUG ACPI_PFX - "Invalid address space type in GAR [0x%llx/%u/%u]\n", - *paddr, width, space_id); - return -EINVAL; - } - - return 0; -} - -/* Pre-map, working on GAR */ -int acpi_pre_map_gar(struct acpi_generic_address *reg) -{ - u64 paddr; - void __iomem *vaddr; - int rc; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; - - rc = acpi_check_gar(reg, &paddr, 0); - if (rc) - return rc; - - vaddr = acpi_pre_map(paddr, reg->bit_width / 8); - if (!vaddr) - return -EIO; - - return 0; -} -EXPORT_SYMBOL_GPL(acpi_pre_map_gar); - -/* Post-unmap, working on GAR */ -int acpi_post_unmap_gar(struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) - return 0; - - rc = acpi_check_gar(reg, &paddr, 0); - if (rc) - return rc; - - acpi_post_unmap(paddr, reg->bit_width / 8); - - return 0; -} -EXPORT_SYMBOL_GPL(acpi_post_unmap_gar); - -#ifdef readq -static inline u64 read64(const volatile void __iomem *addr) -{ - return readq(addr); -} -#else -static inline u64 read64(const volatile void __iomem *addr) -{ - u64 l, h; - l = readl(addr); - h = readl(addr+4); - return l | (h << 32); -} -#endif - -/* - * Can be used in atomic (including NMI) or process context. RCU read - * lock can only be released after the IO memory area accessing. - */ -static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width) -{ - void __iomem *addr; - - rcu_read_lock(); - addr = __acpi_ioremap_fast(paddr, width); - switch (width) { - case 8: - *val = readb(addr); - break; - case 16: - *val = readw(addr); - break; - case 32: - *val = readl(addr); - break; - case 64: - *val = read64(addr); - break; - default: - return -EINVAL; - } - rcu_read_unlock(); - - return 0; -} - -#ifdef writeq -static inline void write64(u64 val, volatile void __iomem *addr) -{ - writeq(val, addr); -} -#else -static inline void write64(u64 val, volatile void __iomem *addr) -{ - writel(val, addr); - writel(val>>32, addr+4); -} -#endif - -static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width) -{ - void __iomem *addr; - - rcu_read_lock(); - addr = __acpi_ioremap_fast(paddr, width); - switch (width) { - case 8: - writeb(val, addr); - break; - case 16: - writew(val, addr); - break; - case 32: - writel(val, addr); - break; - case 64: - write64(val, addr); - break; - default: - return -EINVAL; - } - rcu_read_unlock(); - - return 0; -} - -/* GAR accessing in atomic (including NMI) or process context */ -int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - rc = acpi_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - *val = 0; - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return acpi_atomic_read_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(acpi_atomic_read); - -int acpi_atomic_write(u64 val, struct acpi_generic_address *reg) -{ - u64 paddr; - int rc; - - rc = acpi_check_gar(reg, &paddr, 1); - if (rc) - return rc; - - switch (reg->space_id) { - case ACPI_ADR_SPACE_SYSTEM_MEMORY: - return acpi_atomic_write_mem(paddr, val, reg->bit_width); - case ACPI_ADR_SPACE_SYSTEM_IO: - return acpi_os_write_port(paddr, val, reg->bit_width); - default: - return -EINVAL; - } -} -EXPORT_SYMBOL_GPL(acpi_atomic_write); diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index fcc12d842bcc..412a1e04a922 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c @@ -31,6 +31,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> +#include <linux/highmem.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/kmod.h> @@ -321,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size) return NULL; } +#ifndef CONFIG_IA64 +#define should_use_kmap(pfn) page_is_ram(pfn) +#else +/* ioremap will take care of cache attributes */ +#define should_use_kmap(pfn) 0 +#endif + +static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz) +{ + unsigned long pfn; + + pfn = pg_off >> PAGE_SHIFT; + if (should_use_kmap(pfn)) { + if (pg_sz > PAGE_SIZE) + return NULL; + return (void __iomem __force *)kmap(pfn_to_page(pfn)); + } else + return acpi_os_ioremap(pg_off, pg_sz); +} + +static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr) +{ + unsigned long pfn; + + pfn = pg_off >> PAGE_SHIFT; + if (page_is_ram(pfn)) + kunmap(pfn_to_page(pfn)); + else + iounmap(vaddr); +} + void __iomem *__init_refok acpi_os_map_memory(acpi_physical_address phys, acpi_size size) { @@ -353,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size) pg_off = round_down(phys, PAGE_SIZE); pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off; - virt = acpi_os_ioremap(pg_off, pg_sz); + virt = acpi_map(pg_off, pg_sz); if (!virt) { mutex_unlock(&acpi_ioremap_lock); kfree(map); @@ -384,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map) { if (!map->refcount) { synchronize_rcu(); - iounmap(map->virt); + acpi_unmap(map->phys, map->virt); kfree(map); } } @@ -710,6 +742,67 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width) return AE_OK; } +#ifdef readq +static inline u64 read64(const volatile void __iomem *addr) +{ + return readq(addr); +} +#else +static inline u64 read64(const volatile void __iomem *addr) +{ + u64 l, h; + l = readl(addr); + h = readl(addr+4); + return l | (h << 32); +} +#endif + +acpi_status +acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width) +{ + void __iomem *virt_addr; + unsigned int size = width / 8; + bool unmap = false; + u64 dummy; + + rcu_read_lock(); + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); + if (!virt_addr) { + rcu_read_unlock(); + virt_addr = acpi_os_ioremap(phys_addr, size); + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; + } + + if (!value) + value = &dummy; + + switch (width) { + case 8: + *(u8 *) value = readb(virt_addr); + break; + case 16: + *(u16 *) value = readw(virt_addr); + break; + case 32: + *(u32 *) value = readl(virt_addr); + break; + case 64: + *(u64 *) value = read64(virt_addr); + break; + default: + BUG(); + } + + if (unmap) + iounmap(virt_addr); + else + rcu_read_unlock(); + + return AE_OK; +} + acpi_status acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) { @@ -749,6 +842,61 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width) return AE_OK; } +#ifdef writeq +static inline void write64(u64 val, volatile void __iomem *addr) +{ + writeq(val, addr); +} +#else +static inline void write64(u64 val, volatile void __iomem *addr) +{ + writel(val, addr); + writel(val>>32, addr+4); +} +#endif + +acpi_status +acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width) +{ + void __iomem *virt_addr; + unsigned int size = width / 8; + bool unmap = false; + + rcu_read_lock(); + virt_addr = acpi_map_vaddr_lookup(phys_addr, size); + if (!virt_addr) { + rcu_read_unlock(); + virt_addr = acpi_os_ioremap(phys_addr, size); + if (!virt_addr) + return AE_BAD_ADDRESS; + unmap = true; + } + + switch (width) { + case 8: + writeb(value, virt_addr); + break; + case 16: + writew(value, virt_addr); + break; + case 32: + writel(value, virt_addr); + break; + case 64: + write64(value, virt_addr); + break; + default: + BUG(); + } + + if (unmap) + iounmap(virt_addr); + else + rcu_read_unlock(); + + return AE_OK; +} + acpi_status acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg, u64 *value, u32 width) diff --git a/drivers/acpi/processor_driver.c b/drivers/acpi/processor_driver.c index e6920d0aca53..f289d2afbd4e 100644 --- a/drivers/acpi/processor_driver.c +++ b/drivers/acpi/processor_driver.c @@ -84,7 +84,7 @@ static int acpi_processor_remove(struct acpi_device *device, int type); static void acpi_processor_notify(struct acpi_device *device, u32 event); static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr); static int acpi_processor_handle_eject(struct acpi_processor *pr); - +static int acpi_processor_start(struct acpi_processor *pr); static const struct acpi_device_id processor_device_ids[] = { {ACPI_PROCESSOR_OBJECT_HID, 0}, @@ -423,10 +423,29 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb, struct acpi_processor *pr = per_cpu(processors, cpu); if (action == CPU_ONLINE && pr) { - acpi_processor_ppc_has_changed(pr, 0); - acpi_processor_hotplug(pr); - acpi_processor_reevaluate_tstate(pr, action); - acpi_processor_tstate_has_changed(pr); + /* CPU got physically hotplugged and onlined the first time: + * Initialize missing things + */ + if (pr->flags.need_hotplug_init) { + struct cpuidle_driver *idle_driver = + cpuidle_get_driver(); + + printk(KERN_INFO "Will online and init hotplugged " + "CPU: %d\n", pr->id); + WARN(acpi_processor_start(pr), "Failed to start CPU:" + " %d\n", pr->id); + pr->flags.need_hotplug_init = 0; + if (idle_driver && !strcmp(idle_driver->name, + "intel_idle")) { + intel_idle_cpu_init(pr->id); + } + /* Normal CPU soft online event */ + } else { + acpi_processor_ppc_has_changed(pr, 0); + acpi_processor_cst_has_changed(pr); + acpi_processor_reevaluate_tstate(pr, action); + acpi_processor_tstate_has_changed(pr); + } } if (action == CPU_DEAD && pr) { /* invalidate the flag.throttling after one CPU is offline */ @@ -440,6 +459,71 @@ static struct notifier_block acpi_cpu_notifier = .notifier_call = acpi_cpu_soft_notify, }; +/* + * acpi_processor_start() is called by the cpu_hotplug_notifier func: + * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the + * root cause seem to be that acpi_processor_uninstall_hotplug_notify() + * is in the module_exit (__exit) func. Allowing acpi_processor_start() + * to not be in __cpuinit section, but being called from __cpuinit funcs + * via __ref looks like the right thing to do here. + */ +static __ref int acpi_processor_start(struct acpi_processor *pr) +{ + struct acpi_device *device = per_cpu(processor_device_array, pr->id); + int result = 0; + +#ifdef CONFIG_CPU_FREQ + acpi_processor_ppc_has_changed(pr, 0); +#endif + acpi_processor_get_throttling_info(pr); + acpi_processor_get_limit_info(pr); + + if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver) + acpi_processor_power_init(pr, device); + + pr->cdev = thermal_cooling_device_register("Processor", device, + &processor_cooling_ops); + if (IS_ERR(pr->cdev)) { + result = PTR_ERR(pr->cdev); + goto err_power_exit; + } + + dev_dbg(&device->dev, "registered as cooling_device%d\n", + pr->cdev->id); + + result = sysfs_create_link(&device->dev.kobj, + &pr->cdev->device.kobj, + "thermal_cooling"); + if (result) { + printk(KERN_ERR PREFIX "Create sysfs link\n"); + goto err_thermal_unregister; + } + result = sysfs_create_link(&pr->cdev->device.kobj, + &device->dev.kobj, + "device"); + if (result) { + printk(KERN_ERR PREFIX "Create sysfs link\n"); + goto err_remove_sysfs_thermal; + } + + return 0; + +err_remove_sysfs_thermal: + sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); +err_thermal_unregister: + thermal_cooling_device_unregister(pr->cdev); +err_power_exit: + acpi_processor_power_exit(pr, device); + + return result; +} + +/* + * Do not put anything in here which needs the core to be online. + * For example MSR access or setting up things which check for cpuinfo_x86 + * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc. + * Such things have to be put in and set up above in acpi_processor_start() + */ static int __cpuinit acpi_processor_add(struct acpi_device *device) { struct acpi_processor *pr = NULL; @@ -509,35 +593,23 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device) &processor_cooling_ops); if (IS_ERR(pr->cdev)) { result = PTR_ERR(pr->cdev); - goto err_power_exit; + goto err_remove_sysfs; } - dev_dbg(&device->dev, "registered as cooling_device%d\n", - pr->cdev->id); + /* + * Do not start hotplugged CPUs now, but when they + * are onlined the first time + */ + if (pr->flags.need_hotplug_init) + return 0; - result = sysfs_create_link(&device->dev.kobj, - &pr->cdev->device.kobj, - "thermal_cooling"); - if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); - goto err_thermal_unregister; - } - result = sysfs_create_link(&pr->cdev->device.kobj, - &device->dev.kobj, - "device"); - if (result) { - printk(KERN_ERR PREFIX "Create sysfs link\n"); + result = acpi_processor_start(pr); + if (result) goto err_remove_sysfs; - } return 0; err_remove_sysfs: - sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); -err_thermal_unregister: - thermal_cooling_device_unregister(pr->cdev); -err_power_exit: - acpi_processor_power_exit(pr, device); sysfs_remove_link(&device->dev.kobj, "sysdev"); err_free_cpumask: free_cpumask_var(pr->throttling.shared_cpu_map); @@ -736,6 +808,17 @@ static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr) return AE_ERROR; } + /* CPU got hot-plugged, but cpu_data is not initialized yet + * Set flag to delay cpu_idle/throttling initialization + * in: + * acpi_processor_add() + * acpi_processor_get_info() + * and do it when the CPU gets online the first time + * TBD: Cleanup above functions and try to do this more elegant. + */ + printk(KERN_INFO "CPU %d got hotplugged\n", pr->id); + pr->flags.need_hotplug_init = 1; + return AE_OK; } diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index 0a7ed69546ba..ca191ff97844 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = { }, { .callback = init_nvs_nosave, + .ident = "Sony Vaio VPCCW29FX", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"), + }, + }, + { + .callback = init_nvs_nosave, .ident = "Averatec AV1020-ED2", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"), |