diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/cputable.c | 11 | ||||
-rw-r--r-- | arch/powerpc/kernel/iomap.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/machine_kexec.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kernel/perf_callchain.c | 20 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init.c | 14 |
5 files changed, 40 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 9fb933248ab6..fa44ff538861 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -2051,7 +2051,8 @@ static struct cpu_spec __initdata cpu_specs[] = { static struct cpu_spec the_cpu_spec; -static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) +static struct cpu_spec * __init setup_cpu_spec(unsigned long offset, + struct cpu_spec *s) { struct cpu_spec *t = &the_cpu_spec; struct cpu_spec old; @@ -2114,6 +2115,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) t->cpu_setup(offset, t); } #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ + + return t; } struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) @@ -2124,10 +2127,8 @@ struct cpu_spec * __init identify_cpu(unsigned long offset, unsigned int pvr) s = PTRRELOC(s); for (i = 0; i < ARRAY_SIZE(cpu_specs); i++,s++) { - if ((pvr & s->pvr_mask) == s->pvr_value) { - setup_cpu_spec(offset, s); - return s; - } + if ((pvr & s->pvr_mask) == s->pvr_value) + return setup_cpu_spec(offset, s); } BUG(); diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c index 1577434f4088..b25f6325fc70 100644 --- a/arch/powerpc/kernel/iomap.c +++ b/arch/powerpc/kernel/iomap.c @@ -117,6 +117,7 @@ void ioport_unmap(void __iomem *addr) EXPORT_SYMBOL(ioport_map); EXPORT_SYMBOL(ioport_unmap); +#ifdef CONFIG_PCI void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max) { resource_size_t start = pci_resource_start(dev, bar); @@ -146,3 +147,4 @@ void pci_iounmap(struct pci_dev *dev, void __iomem *addr) EXPORT_SYMBOL(pci_iomap); EXPORT_SYMBOL(pci_iounmap); +#endif /* CONFIG_PCI */ diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index 6658a1589955..9ce1672afb59 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -136,12 +136,16 @@ void __init reserve_crashkernel(void) crashk_res.start = KDUMP_KERNELBASE; #else if (!crashk_res.start) { +#ifdef CONFIG_PPC64 /* - * unspecified address, choose a region of specified size - * can overlap with initrd (ignoring corruption when retained) - * ppc64 requires kernel and some stacks to be in first segemnt + * On 64bit we split the RMO in half but cap it at half of + * a small SLB (128MB) since the crash kernel needs to place + * itself and some stacks to be in the first segment. */ + crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2)); +#else crashk_res.start = KDUMP_KERNELBASE; +#endif } crash_base = PAGE_ALIGN(crashk_res.start); diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index d05ae4204bbf..564c1d8bdb5c 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c @@ -154,8 +154,12 @@ static int read_user_stack_64(unsigned long __user *ptr, unsigned long *ret) ((unsigned long)ptr & 7)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 8); } @@ -166,8 +170,12 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) ((unsigned long)ptr & 3)) return -EFAULT; - if (!__get_user_inatomic(*ret, ptr)) + pagefault_disable(); + if (!__get_user_inatomic(*ret, ptr)) { + pagefault_enable(); return 0; + } + pagefault_enable(); return read_user_stack_slow(ptr, ret, 4); } @@ -294,11 +302,17 @@ static inline int current_is_64bit(void) */ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) { + int rc; + if ((unsigned long)ptr > TASK_SIZE - sizeof(unsigned int) || ((unsigned long)ptr & 3)) return -EFAULT; - return __get_user_inatomic(*ret, ptr); + pagefault_disable(); + rc = __get_user_inatomic(*ret, ptr); + pagefault_enable(); + + return rc; } static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index c016033ba78d..a909f4e9343b 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -1020,7 +1020,7 @@ static unsigned long __init alloc_up(unsigned long size, unsigned long align) } if (addr == 0) return 0; - RELOC(alloc_bottom) = addr; + RELOC(alloc_bottom) = addr + size; prom_debug(" -> %x\n", addr); prom_debug(" alloc_bottom : %x\n", RELOC(alloc_bottom)); @@ -1830,11 +1830,13 @@ static void __init *make_room(unsigned long *mem_start, unsigned long *mem_end, if (room > DEVTREE_CHUNK_SIZE) room = DEVTREE_CHUNK_SIZE; if (room < PAGE_SIZE) - prom_panic("No memory for flatten_device_tree (no room)"); + prom_panic("No memory for flatten_device_tree " + "(no room)\n"); chunk = alloc_up(room, 0); if (chunk == 0) - prom_panic("No memory for flatten_device_tree (claim failed)"); - *mem_end = RELOC(alloc_top); + prom_panic("No memory for flatten_device_tree " + "(claim failed)\n"); + *mem_end = chunk + room; } ret = (void *)*mem_start; @@ -2042,7 +2044,7 @@ static void __init flatten_device_tree(void) /* * Check how much room we have between alloc top & bottom (+/- a - * few pages), crop to 4Mb, as this is our "chuck" size + * few pages), crop to 1MB, as this is our "chunk" size */ room = RELOC(alloc_top) - RELOC(alloc_bottom) - 0x4000; if (room > DEVTREE_CHUNK_SIZE) @@ -2053,7 +2055,7 @@ static void __init flatten_device_tree(void) mem_start = (unsigned long)alloc_up(room, PAGE_SIZE); if (mem_start == 0) prom_panic("Can't allocate initial device-tree chunk\n"); - mem_end = RELOC(alloc_top); + mem_end = mem_start + room; /* Get root of tree */ root = call_prom("peer", 1, 1, (phandle)0); |