diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-08 06:50:37 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-10-08 06:50:37 +0300 |
commit | 997b611baf7591ea5119539ee821a3e2f4fcf24e (patch) | |
tree | 9f2c78a343ddb90810f5dabc8e6cf07ee118eb19 | |
parent | 2c34ff14bf1d03a705f5400888ecac5b6400e981 (diff) | |
parent | 690d097c00c88fa9d93d198591e184164b1d8c20 (diff) | |
download | linux-997b611baf7591ea5119539ee821a3e2f4fcf24e.tar.xz |
Merge branch 'parisc-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux
Pull parisc updates from Helge Deller:
"Changes include:
- Fix boot of 32bit SMP kernel (initial kernel mapping was too small)
- Added hardened usercopy checks
- Drop bootmem and switch to memblock and NO_BOOTMEM implementation
- Drop the BROKEN_RODATA config option (and thus remove the relevant
code from the generic headers and files because parisc was the last
architecture which used this config option)
- Improve segfault reporting by printing human readable error strings
- Various smaller changes, e.g. dwarf debug support for assembly
code, update comments regarding copy_user_page_asm, switch to
kmalloc_array()"
* 'parisc-4.9-1' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
parisc: Increase KERNEL_INITIAL_SIZE for 32-bit SMP kernels
parisc: Drop bootmem and switch to memblock
parisc: Add hardened usercopy feature
parisc: Add cfi_startproc and cfi_endproc to assembly code
parisc: Move hpmc stack into page aligned bss section
parisc: Fix self-detected CPU stall warnings on Mako machines
parisc: Report trap type as human readable string
parisc: Update comment regarding implementation of copy_user_page_asm
parisc: Use kmalloc_array() in add_system_map_addresses()
parisc: Check return value of smp_boot_one_cpu()
parisc: Drop BROKEN_RODATA config option
-rw-r--r-- | arch/parisc/Kconfig | 4 | ||||
-rw-r--r-- | arch/parisc/include/asm/dwarf.h | 23 | ||||
-rw-r--r-- | arch/parisc/include/asm/linkage.h | 12 | ||||
-rw-r--r-- | arch/parisc/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/parisc/include/asm/uaccess.h | 48 | ||||
-rw-r--r-- | arch/parisc/kernel/entry.S | 46 | ||||
-rw-r--r-- | arch/parisc/kernel/hpmc.S | 16 | ||||
-rw-r--r-- | arch/parisc/kernel/inventory.c | 2 | ||||
-rw-r--r-- | arch/parisc/kernel/pacache.S | 84 | ||||
-rw-r--r-- | arch/parisc/kernel/real2.S | 24 | ||||
-rw-r--r-- | arch/parisc/kernel/setup.c | 8 | ||||
-rw-r--r-- | arch/parisc/kernel/smp.c | 4 | ||||
-rw-r--r-- | arch/parisc/kernel/time.c | 6 | ||||
-rw-r--r-- | arch/parisc/kernel/vmlinux.lds.S | 2 | ||||
-rw-r--r-- | arch/parisc/lib/fixup.S | 16 | ||||
-rw-r--r-- | arch/parisc/lib/lusercopy.S | 8 | ||||
-rw-r--r-- | arch/parisc/lib/memcpy.c | 11 | ||||
-rw-r--r-- | arch/parisc/mm/fault.c | 48 | ||||
-rw-r--r-- | arch/parisc/mm/init.c | 126 | ||||
-rw-r--r-- | include/linux/init.h | 19 | ||||
-rw-r--r-- | init/Kconfig | 6 |
21 files changed, 308 insertions, 207 deletions
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index af12c2db9bb8..2a0339a68894 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -10,12 +10,13 @@ config PARISC select RTC_CLASS select RTC_DRV_GENERIC select INIT_ALL_POSSIBLE + select HAVE_MEMBLOCK + select NO_BOOTMEM select BUG select BUILDTIME_EXTABLE_SORT select HAVE_PERF_EVENTS select GENERIC_ATOMIC64 if !64BIT select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE - select BROKEN_RODATA select GENERIC_IRQ_PROBE select GENERIC_PCI_IOMAP select ARCH_HAVE_NMI_SAFE_CMPXCHG @@ -24,6 +25,7 @@ config PARISC select SYSCTL_ARCH_UNALIGN_ALLOW select SYSCTL_EXCEPTION_TRACE select HAVE_MOD_ARCH_SPECIFIC + select HAVE_ARCH_HARDENED_USERCOPY select VIRT_TO_BUS select MODULES_USE_ELF_RELA select CLONE_BACKWARDS diff --git a/arch/parisc/include/asm/dwarf.h b/arch/parisc/include/asm/dwarf.h new file mode 100644 index 000000000000..8fe7d6b2cc42 --- /dev/null +++ b/arch/parisc/include/asm/dwarf.h @@ -0,0 +1,23 @@ +/* + * Copyright (C) 2016 Helge Deller <deller@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ASM_PARISC_DWARF_H +#define _ASM_PARISC_DWARF_H + +#ifdef __ASSEMBLY__ + +#define CFI_STARTPROC .cfi_startproc +#define CFI_ENDPROC .cfi_endproc +#define CFI_DEF_CFA .cfi_def_cfa +#define CFI_REGISTER .cfi_register +#define CFI_REL_OFFSET .cfi_rel_offset +#define CFI_UNDEFINED .cfi_undefined + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PARISC_DWARF_H */ diff --git a/arch/parisc/include/asm/linkage.h b/arch/parisc/include/asm/linkage.h index 0b19a7242d0c..67e6b433d399 100644 --- a/arch/parisc/include/asm/linkage.h +++ b/arch/parisc/include/asm/linkage.h @@ -1,6 +1,8 @@ #ifndef __ASM_PARISC_LINKAGE_H #define __ASM_PARISC_LINKAGE_H +#include <asm/dwarf.h> + #ifndef __ALIGN #define __ALIGN .align 4 #define __ALIGN_STR ".align 4" @@ -10,6 +12,8 @@ * In parisc assembly a semicolon marks a comment while a * exclamation mark is used to separate independent lines. */ +#define ASM_NL ! + #ifdef __ASSEMBLY__ #define ENTRY(name) \ @@ -26,6 +30,14 @@ name: END(name) #endif +#define ENTRY_CFI(name) \ + ENTRY(name) ASM_NL\ + CFI_STARTPROC + +#define ENDPROC_CFI(name) \ + ENDPROC(name) ASM_NL\ + CFI_ENDPROC + #endif /* __ASSEMBLY__ */ #endif /* __ASM_PARISC_LINKAGE_H */ diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h index 291cee28ccb6..e44bdb9078a5 100644 --- a/arch/parisc/include/asm/pgtable.h +++ b/arch/parisc/include/asm/pgtable.h @@ -83,7 +83,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e)) /* This is the size of the initially mapped kernel memory */ -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) || defined(CONFIG_SMP) #define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */ #else #define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */ diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 482847865dac..9a2aee1b90fc 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -11,6 +11,7 @@ #include <linux/bug.h> #include <linux/string.h> +#include <linux/thread_info.h> #define VERIFY_READ 0 #define VERIFY_WRITE 1 @@ -201,10 +202,12 @@ extern long lstrnlen_user(const char __user *, long); #define clear_user lclear_user #define __clear_user lclear_user -unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len); -#define __copy_to_user copy_to_user -unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len); -unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len); +unsigned long __must_check __copy_to_user(void __user *dst, const void *src, + unsigned long len); +unsigned long __must_check __copy_from_user(void *dst, const void __user *src, + unsigned long len); +unsigned long copy_in_user(void __user *dst, const void __user *src, + unsigned long len); #define __copy_in_user copy_in_user #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user @@ -217,23 +220,40 @@ static inline void copy_user_overflow(int size, unsigned long count) WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); } -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) +static __always_inline unsigned long __must_check +copy_from_user(void *to, const void __user *from, unsigned long n) { - int sz = __compiletime_object_size(to); - unsigned long ret = n; + int sz = __compiletime_object_size(to); + unsigned long ret = n; - if (likely(sz == -1 || sz >= n)) - ret = __copy_from_user(to, from, n); - else if (!__builtin_constant_p(n)) + if (likely(sz < 0 || sz >= n)) { + check_object_size(to, n, false); + ret = __copy_from_user(to, from, n); + } else if (!__builtin_constant_p(n)) copy_user_overflow(sz, n); else - __bad_copy_user(); + __bad_copy_user(); if (unlikely(ret)) memset(to + (n - ret), 0, ret); - return ret; + + return ret; +} + +static __always_inline unsigned long __must_check +copy_to_user(void __user *to, const void *from, unsigned long n) +{ + int sz = __compiletime_object_size(from); + + if (likely(sz < 0 || sz >= n)) { + check_object_size(from, n, true); + n = __copy_to_user(to, from, n); + } else if (!__builtin_constant_p(n)) + copy_user_overflow(sz, n); + else + __bad_copy_user(); + + return n; } struct pt_regs; diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S index baa3d9d6e971..4fcff2dcc9c3 100644 --- a/arch/parisc/kernel/entry.S +++ b/arch/parisc/kernel/entry.S @@ -766,7 +766,7 @@ ENTRY(end_fault_vector) * copy_thread moved args into task save area. */ -ENTRY(ret_from_kernel_thread) +ENTRY_CFI(ret_from_kernel_thread) /* Call schedule_tail first though */ BL schedule_tail, %r2 @@ -782,7 +782,7 @@ ENTRY(ret_from_kernel_thread) copy %r31, %r2 b finish_child_return nop -ENDPROC(ret_from_kernel_thread) +ENDPROC_CFI(ret_from_kernel_thread) /* @@ -790,7 +790,7 @@ ENDPROC(ret_from_kernel_thread) * struct task_struct *next) * * switch kernel stacks and return prev */ -ENTRY(_switch_to) +ENTRY_CFI(_switch_to) STREG %r2, -RP_OFFSET(%r30) callee_save_float @@ -815,7 +815,7 @@ _switch_to_ret: LDREG -RP_OFFSET(%r30), %r2 bv %r0(%r2) copy %r26, %r28 -ENDPROC(_switch_to) +ENDPROC_CFI(_switch_to) /* * Common rfi return path for interruptions, kernel execve, and @@ -833,7 +833,7 @@ ENDPROC(_switch_to) .align PAGE_SIZE -ENTRY(syscall_exit_rfi) +ENTRY_CFI(syscall_exit_rfi) mfctl %cr30,%r16 LDREG TI_TASK(%r16), %r16 /* thread_info -> task_struct */ ldo TASK_REGS(%r16),%r16 @@ -1037,12 +1037,12 @@ intr_extint: b do_cpu_irq_mask ldo R%intr_return(%r2), %r2 /* return to intr_return, not here */ -ENDPROC(syscall_exit_rfi) +ENDPROC_CFI(syscall_exit_rfi) /* Generic interruptions (illegal insn, unaligned, page fault, etc) */ -ENTRY(intr_save) /* for os_hpmc */ +ENTRY_CFI(intr_save) /* for os_hpmc */ mfsp %sr7,%r16 cmpib,COND(=),n 0,%r16,1f get_stack_use_cr30 @@ -1117,7 +1117,7 @@ skip_save_ior: b handle_interruption ldo R%intr_check_sig(%r2), %r2 -ENDPROC(intr_save) +ENDPROC_CFI(intr_save) /* @@ -1720,7 +1720,7 @@ dtlb_fault: .endm .macro fork_like name -ENTRY(sys_\name\()_wrapper) +ENTRY_CFI(sys_\name\()_wrapper) LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30), %r1 ldo TASK_REGS(%r1),%r1 reg_save %r1 @@ -1728,7 +1728,7 @@ ENTRY(sys_\name\()_wrapper) ldil L%sys_\name, %r31 be R%sys_\name(%sr4,%r31) STREG %r28, PT_CR27(%r1) -ENDPROC(sys_\name\()_wrapper) +ENDPROC_CFI(sys_\name\()_wrapper) .endm fork_like clone @@ -1736,7 +1736,7 @@ fork_like fork fork_like vfork /* Set the return value for the child */ -ENTRY(child_return) +ENTRY_CFI(child_return) BL schedule_tail, %r2 nop finish_child_return: @@ -1748,9 +1748,9 @@ finish_child_return: reg_restore %r1 b syscall_exit copy %r0,%r28 -ENDPROC(child_return) +ENDPROC_CFI(child_return) -ENTRY(sys_rt_sigreturn_wrapper) +ENTRY_CFI(sys_rt_sigreturn_wrapper) LDREG TI_TASK-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r26 ldo TASK_REGS(%r26),%r26 /* get pt regs */ /* Don't save regs, we are going to restore them from sigcontext. */ @@ -1778,9 +1778,9 @@ ENTRY(sys_rt_sigreturn_wrapper) */ bv %r0(%r2) LDREG PT_GR28(%r1),%r28 /* reload original r28 for syscall_exit */ -ENDPROC(sys_rt_sigreturn_wrapper) +ENDPROC_CFI(sys_rt_sigreturn_wrapper) -ENTRY(syscall_exit) +ENTRY_CFI(syscall_exit) /* NOTE: Not all syscalls exit this way. rt_sigreturn will exit * via syscall_exit_rfi if the signal was received while the process * was running. @@ -1979,7 +1979,7 @@ syscall_do_resched: #else nop #endif -ENDPROC(syscall_exit) +ENDPROC_CFI(syscall_exit) #ifdef CONFIG_FUNCTION_TRACER @@ -2023,7 +2023,7 @@ ENDPROC(mcount) .align 8 .globl return_to_handler .type return_to_handler, @function -ENTRY(return_to_handler) +ENTRY_CFI(return_to_handler) .proc .callinfo caller,frame=FRAME_SIZE .entry @@ -2067,7 +2067,7 @@ parisc_return_to_handler: LDREGM -FRAME_SIZE(%sp),%r3 .exit .procend -ENDPROC(return_to_handler) +ENDPROC_CFI(return_to_handler) #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -2076,7 +2076,7 @@ ENDPROC(return_to_handler) #ifdef CONFIG_IRQSTACKS /* void call_on_stack(unsigned long param1, void *func, unsigned long new_stack) */ -ENTRY(call_on_stack) +ENTRY_CFI(call_on_stack) copy %sp, %r1 /* Regarding the HPPA calling conventions for function pointers, @@ -2112,10 +2112,10 @@ ENTRY(call_on_stack) bv (%rp) LDREG -68(%sp), %sp # endif /* CONFIG_64BIT */ -ENDPROC(call_on_stack) +ENDPROC_CFI(call_on_stack) #endif /* CONFIG_IRQSTACKS */ -get_register: +ENTRY_CFI(get_register) /* * get_register is used by the non access tlb miss handlers to * copy the value of the general register specified in r8 into @@ -2192,9 +2192,10 @@ get_register: copy %r30,%r1 bv %r0(%r25) /* r31 */ copy %r31,%r1 +ENDPROC_CFI(get_register) -set_register: +ENTRY_CFI(set_register) /* * set_register is used by the non access tlb miss handlers to * copy the value of r1 into the general register specified in @@ -2266,4 +2267,5 @@ set_register: copy %r1,%r30 bv %r0(%r25) /* r31 */ copy %r1,%r31 +ENDPROC_CFI(set_register) diff --git a/arch/parisc/kernel/hpmc.S b/arch/parisc/kernel/hpmc.S index e158b6fbf1b4..0fbd0a0e1cda 100644 --- a/arch/parisc/kernel/hpmc.S +++ b/arch/parisc/kernel/hpmc.S @@ -41,12 +41,12 @@ */ .level 1.1 - .data #include <asm/assembly.h> #include <asm/pdc.h> #include <linux/linkage.h> +#include <linux/init.h> /* * stack for os_hpmc, the HPMC handler. @@ -55,22 +55,26 @@ * IODC requires 7K byte stack. That leaves 1K byte for os_hpmc. */ + __PAGE_ALIGNED_BSS .align 4096 hpmc_stack: .block 16384 #define HPMC_IODC_BUF_SIZE 0x8000 + __PAGE_ALIGNED_BSS .align 4096 hpmc_iodc_buf: .block HPMC_IODC_BUF_SIZE + .section .bss .align 8 hpmc_raddr: .block 128 #define HPMC_PIM_DATA_SIZE 896 /* Enough to hold all architected 2.0 state */ + .section .bss .align 8 ENTRY(hpmc_pim_data) .block HPMC_PIM_DATA_SIZE @@ -79,7 +83,7 @@ END(hpmc_pim_data) .text .import intr_save, code -ENTRY(os_hpmc) +ENTRY_CFI(os_hpmc) .os_hpmc: /* @@ -295,11 +299,11 @@ os_hpmc_6: b . nop -ENDPROC(os_hpmc) +ENDPROC_CFI(os_hpmc) .os_hpmc_end: - nop -.data -.align 4 + + + __INITRODATA .export os_hpmc_size os_hpmc_size: .word .os_hpmc_end-.os_hpmc diff --git a/arch/parisc/kernel/inventory.c b/arch/parisc/kernel/inventory.c index f0b6722fc706..545f9d2fe711 100644 --- a/arch/parisc/kernel/inventory.c +++ b/arch/parisc/kernel/inventory.c @@ -506,7 +506,7 @@ add_system_map_addresses(struct parisc_device *dev, int num_addrs, long status; struct pdc_system_map_addr_info addr_result; - dev->addr = kmalloc(num_addrs * sizeof(unsigned long), GFP_KERNEL); + dev->addr = kmalloc_array(num_addrs, sizeof(*dev->addr), GFP_KERNEL); if(!dev->addr) { printk(KERN_ERR "%s %s(): memory allocation failure\n", __FILE__, __func__); diff --git a/arch/parisc/kernel/pacache.S b/arch/parisc/kernel/pacache.S index b743a80eaba0..985e06da37f5 100644 --- a/arch/parisc/kernel/pacache.S +++ b/arch/parisc/kernel/pacache.S @@ -41,7 +41,7 @@ .text .align 128 -ENTRY(flush_tlb_all_local) +ENTRY_CFI(flush_tlb_all_local) .proc .callinfo NO_CALLS .entry @@ -190,11 +190,11 @@ fdtdone: .exit .procend -ENDPROC(flush_tlb_all_local) +ENDPROC_CFI(flush_tlb_all_local) .import cache_info,data -ENTRY(flush_instruction_cache_local) +ENTRY_CFI(flush_instruction_cache_local) .proc .callinfo NO_CALLS .entry @@ -257,11 +257,11 @@ fisync: .exit .procend -ENDPROC(flush_instruction_cache_local) +ENDPROC_CFI(flush_instruction_cache_local) .import cache_info, data -ENTRY(flush_data_cache_local) +ENTRY_CFI(flush_data_cache_local) .proc .callinfo NO_CALLS .entry @@ -325,7 +325,7 @@ fdsync: .exit .procend -ENDPROC(flush_data_cache_local) +ENDPROC_CFI(flush_data_cache_local) .align 16 @@ -356,7 +356,7 @@ ENDPROC(flush_data_cache_local) /* Clear page using kernel mapping. */ -ENTRY(clear_page_asm) +ENTRY_CFI(clear_page_asm) .proc .callinfo NO_CALLS .entry @@ -422,11 +422,11 @@ ENTRY(clear_page_asm) .exit .procend -ENDPROC(clear_page_asm) +ENDPROC_CFI(clear_page_asm) /* Copy page using kernel mapping. */ -ENTRY(copy_page_asm) +ENTRY_CFI(copy_page_asm) .proc .callinfo NO_CALLS .entry @@ -540,7 +540,7 @@ ENTRY(copy_page_asm) .exit .procend -ENDPROC(copy_page_asm) +ENDPROC_CFI(copy_page_asm) /* * NOTE: Code in clear_user_page has a hard coded dependency on the @@ -573,11 +573,17 @@ ENDPROC(copy_page_asm) .endm /* - * We can't do this since copy_user_page is used to bring in - * file data that might have instructions. Since the data would - * then need to be flushed out so the i-fetch can see it, it - * makes more sense to just copy through the kernel translation - * and flush it. + * copy_user_page_asm() performs a page copy using mappings + * equivalent to the user page mappings. It can be used to + * implement copy_user_page() but unfortunately both the `from' + * and `to' pages need to be flushed through mappings equivalent + * to the user mappings after the copy because the kernel accesses + * the `from' page through the kmap kernel mapping and the `to' + * page needs to be flushed since code can be copied. As a + * result, this implementation is less efficient than the simpler + * copy using the kernel mapping. It only needs the `from' page + * to flushed via the user mapping. The kunmap routines handle + * the flushes needed for the kernel mapping. * * I'm still keeping this around because it may be possible to * use it if more information is passed into copy_user_page(). @@ -586,7 +592,7 @@ ENDPROC(copy_page_asm) * */ -ENTRY(copy_user_page_asm) +ENTRY_CFI(copy_user_page_asm) .proc .callinfo NO_CALLS .entry @@ -742,9 +748,9 @@ ENTRY(copy_user_page_asm) .exit .procend -ENDPROC(copy_user_page_asm) +ENDPROC_CFI(copy_user_page_asm) -ENTRY(clear_user_page_asm) +ENTRY_CFI(clear_user_page_asm) .proc .callinfo NO_CALLS .entry @@ -828,9 +834,9 @@ ENTRY(clear_user_page_asm) .exit .procend -ENDPROC(clear_user_page_asm) +ENDPROC_CFI(clear_user_page_asm) -ENTRY(flush_dcache_page_asm) +ENTRY_CFI(flush_dcache_page_asm) .proc .callinfo NO_CALLS .entry @@ -904,9 +910,9 @@ ENTRY(flush_dcache_page_asm) .exit .procend -ENDPROC(flush_dcache_page_asm) +ENDPROC_CFI(flush_dcache_page_asm) -ENTRY(flush_icache_page_asm) +ENTRY_CFI(flush_icache_page_asm) .proc .callinfo NO_CALLS .entry @@ -982,9 +988,9 @@ ENTRY(flush_icache_page_asm) .exit .procend -ENDPROC(flush_icache_page_asm) +ENDPROC_CFI(flush_icache_page_asm) -ENTRY(flush_kernel_dcache_page_asm) +ENTRY_CFI(flush_kernel_dcache_page_asm) .proc .callinfo NO_CALLS .entry @@ -1025,9 +1031,9 @@ ENTRY(flush_kernel_dcache_page_asm) .exit .procend -ENDPROC(flush_kernel_dcache_page_asm) +ENDPROC_CFI(flush_kernel_dcache_page_asm) -ENTRY(purge_kernel_dcache_page_asm) +ENTRY_CFI(purge_kernel_dcache_page_asm) .proc .callinfo NO_CALLS .entry @@ -1067,9 +1073,9 @@ ENTRY(purge_kernel_dcache_page_asm) .exit .procend -ENDPROC(purge_kernel_dcache_page_asm) +ENDPROC_CFI(purge_kernel_dcache_page_asm) -ENTRY(flush_user_dcache_range_asm) +ENTRY_CFI(flush_user_dcache_range_asm) .proc .callinfo NO_CALLS .entry @@ -1088,9 +1094,9 @@ ENTRY(flush_user_dcache_range_asm) .exit .procend -ENDPROC(flush_user_dcache_range_asm) +ENDPROC_CFI(flush_user_dcache_range_asm) -ENTRY(flush_kernel_dcache_range_asm) +ENTRY_CFI(flush_kernel_dcache_range_asm) .proc .callinfo NO_CALLS .entry @@ -1110,9 +1116,9 @@ ENTRY(flush_kernel_dcache_range_asm) .exit .procend -ENDPROC(flush_kernel_dcache_range_asm) +ENDPROC_CFI(flush_kernel_dcache_range_asm) -ENTRY(flush_user_icache_range_asm) +ENTRY_CFI(flush_user_icache_range_asm) .proc .callinfo NO_CALLS .entry @@ -1131,9 +1137,9 @@ ENTRY(flush_user_icache_range_asm) .exit .procend -ENDPROC(flush_user_icache_range_asm) +ENDPROC_CFI(flush_user_icache_range_asm) -ENTRY(flush_kernel_icache_page) +ENTRY_CFI(flush_kernel_icache_page) .proc .callinfo NO_CALLS .entry @@ -1174,9 +1180,9 @@ ENTRY(flush_kernel_icache_page) .exit .procend -ENDPROC(flush_kernel_icache_page) +ENDPROC_CFI(flush_kernel_icache_page) -ENTRY(flush_kernel_icache_range_asm) +ENTRY_CFI(flush_kernel_icache_range_asm) .proc .callinfo NO_CALLS .entry @@ -1194,13 +1200,13 @@ ENTRY(flush_kernel_icache_range_asm) nop .exit .procend -ENDPROC(flush_kernel_icache_range_asm) +ENDPROC_CFI(flush_kernel_icache_range_asm) /* align should cover use of rfi in disable_sr_hashing_asm and * srdis_done. */ .align 256 -ENTRY(disable_sr_hashing_asm) +ENTRY_CFI(disable_sr_hashing_asm) .proc .callinfo NO_CALLS .entry @@ -1289,6 +1295,6 @@ srdis_done: .exit .procend -ENDPROC(disable_sr_hashing_asm) +ENDPROC_CFI(disable_sr_hashing_asm) .end diff --git a/arch/parisc/kernel/real2.S b/arch/parisc/kernel/real2.S index 5f3d3a1f9037..1db58e546230 100644 --- a/arch/parisc/kernel/real2.S +++ b/arch/parisc/kernel/real2.S @@ -61,7 +61,7 @@ save_cr_end: * iodc_fn is the IODC function to call */ -ENTRY(real32_call_asm) +ENTRY_CFI(real32_call_asm) STREG %rp, -RP_OFFSET(%sp) /* save RP */ #ifdef CONFIG_64BIT callee_save @@ -119,14 +119,14 @@ ric_ret: LDREG -RP_OFFSET(%sp), %rp /* restore RP */ bv 0(%rp) nop -ENDPROC(real32_call_asm) +ENDPROC_CFI(real32_call_asm) # define PUSH_CR(r, where) mfctl r, %r1 ! STREG,ma %r1, REG_SZ(where) # define POP_CR(r, where) LDREG,mb -REG_SZ(where), %r1 ! mtctl %r1, r .text -save_control_regs: +ENTRY_CFI(save_control_regs) load32 PA(save_cr_space), %r28 PUSH_CR(%cr24, %r28) PUSH_CR(%cr25, %r28) @@ -139,8 +139,9 @@ save_control_regs: PUSH_CR(%cr15, %r28) bv 0(%r2) nop +ENDPROC_CFI(save_control_regs) -restore_control_regs: +ENTRY_CFI(restore_control_regs) load32 PA(save_cr_end), %r26 POP_CR(%cr15, %r26) POP_CR(%cr31, %r26) @@ -153,13 +154,14 @@ restore_control_regs: POP_CR(%cr24, %r26) bv 0(%r2) nop +ENDPROC_CFI(restore_control_regs) /* rfi_virt2real() and rfi_real2virt() could perhaps be adapted for * more general-purpose use by the several places which need RFIs */ .text .align 128 -rfi_virt2real: +ENTRY_CFI(rfi_virt2real) /* switch to real mode... */ rsm PSW_SM_I,%r0 load32 PA(rfi_v2r_1), %r1 @@ -191,10 +193,11 @@ rfi_v2r_1: tophys_r1 %r2 bv 0(%r2) nop +ENDPROC_CFI(rfi_virt2real) .text .align 128 -rfi_real2virt: +ENTRY_CFI(rfi_real2virt) rsm PSW_SM_I,%r0 load32 (rfi_r2v_1), %r1 nop @@ -225,6 +228,7 @@ rfi_r2v_1: tovirt_r1 %r2 bv 0(%r2) nop +ENDPROC_CFI(rfi_real2virt) #ifdef CONFIG_64BIT @@ -238,7 +242,7 @@ rfi_r2v_1: * arg0p points to where saved arg values may be found * iodc_fn is the IODC function to call */ -ENTRY(real64_call_asm) +ENTRY_CFI(real64_call_asm) std %rp, -0x10(%sp) /* save RP */ std %sp, -8(%arg0) /* save SP on real-mode stack */ copy %arg0, %sp /* adopt the real-mode SP */ @@ -284,7 +288,7 @@ r64_ret: ldd -0x10(%sp), %rp /* restore RP */ bv 0(%rp) nop -ENDPROC(real64_call_asm) +ENDPROC_CFI(real64_call_asm) #endif @@ -293,12 +297,12 @@ ENDPROC(real64_call_asm) ** GCC 3.3 and later has a new function in libgcc.a for ** comparing function pointers. */ -ENTRY(__canonicalize_funcptr_for_compare) +ENTRY_CFI(__canonicalize_funcptr_for_compare) #ifdef CONFIG_64BIT bve (%r2) #else bv %r0(%r2) #endif copy %r26,%r28 -ENDPROC(__canonicalize_funcptr_for_compare) +ENDPROC_CFI(__canonicalize_funcptr_for_compare) diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c index f7ea626e29c9..81d6f6391944 100644 --- a/arch/parisc/kernel/setup.c +++ b/arch/parisc/kernel/setup.c @@ -38,6 +38,7 @@ #include <linux/export.h> #include <asm/processor.h> +#include <asm/sections.h> #include <asm/pdc.h> #include <asm/led.h> #include <asm/machdep.h> /* for pa7300lc_init() proto */ @@ -140,6 +141,13 @@ void __init setup_arch(char **cmdline_p) #endif printk(KERN_CONT ".\n"); + /* + * Check if initial kernel page mappings are sufficient. + * panic early if not, else we may access kernel functions + * and variables which can't be reached. + */ + if (__pa((unsigned long) &_end) >= KERNEL_INITIAL_SIZE) + panic("KERNEL_INITIAL_ORDER too small!"); pdc_console_init(); diff --git a/arch/parisc/kernel/smp.c b/arch/parisc/kernel/smp.c index c2a9cc55a62f..75dab2871346 100644 --- a/arch/parisc/kernel/smp.c +++ b/arch/parisc/kernel/smp.c @@ -412,8 +412,8 @@ void smp_cpus_done(unsigned int cpu_max) int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - if (cpu != 0 && cpu < parisc_max_cpus) - smp_boot_one_cpu(cpu, tidle); + if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle)) + return -ENOSYS; return cpu_online(cpu) ? 0 : -ENOSYS; } diff --git a/arch/parisc/kernel/time.c b/arch/parisc/kernel/time.c index 4b0b963d52a7..9b63b876a13a 100644 --- a/arch/parisc/kernel/time.c +++ b/arch/parisc/kernel/time.c @@ -226,12 +226,6 @@ void __init start_cpu_itimer(void) unsigned int cpu = smp_processor_id(); unsigned long next_tick = mfctl(16) + clocktick; -#if defined(CONFIG_HAVE_UNSTABLE_SCHED_CLOCK) && defined(CONFIG_64BIT) - /* With multiple 64bit CPUs online, the cr16's are not syncronized. */ - if (cpu != 0) - clear_sched_clock_stable(); -#endif - mtctl(next_tick, 16); /* kick off Interval Timer (CR16) */ per_cpu(cpu_data, cpu).it_value = next_tick; diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S index f3ead0b6ce46..5b8fae8d4d12 100644 --- a/arch/parisc/kernel/vmlinux.lds.S +++ b/arch/parisc/kernel/vmlinux.lds.S @@ -138,8 +138,6 @@ SECTIONS /* BSS */ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE) - /* bootmap is allocated in setup_bootmem() directly behind bss. */ - . = ALIGN(HUGEPAGE_SIZE); _end = . ; diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S index 1052b747e011..a5b72f22c7a6 100644 --- a/arch/parisc/lib/fixup.S +++ b/arch/parisc/lib/fixup.S @@ -65,34 +65,34 @@ .section .fixup, "ax" /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ -ENTRY(fixup_get_user_skip_1) +ENTRY_CFI(fixup_get_user_skip_1) get_fault_ip %r1,%r8 ldo 4(%r1), %r1 ldi -EFAULT, %r8 bv %r0(%r1) copy %r0, %r9 -ENDPROC(fixup_get_user_skip_1) +ENDPROC_CFI(fixup_get_user_skip_1) -ENTRY(fixup_get_user_skip_2) +ENTRY_CFI(fixup_get_user_skip_2) get_fault_ip %r1,%r8 ldo 8(%r1), %r1 ldi -EFAULT, %r8 bv %r0(%r1) copy %r0, %r9 -ENDPROC(fixup_get_user_skip_2) +ENDPROC_CFI(fixup_get_user_skip_2) /* put_user() fixups, store -EFAULT in r8 */ -ENTRY(fixup_put_user_skip_1) +ENTRY_CFI(fixup_put_user_skip_1) get_fault_ip %r1,%r8 ldo 4(%r1), %r1 bv %r0(%r1) ldi -EFAULT, %r8 -ENDPROC(fixup_put_user_skip_1) +ENDPROC_CFI(fixup_put_user_skip_1) -ENTRY(fixup_put_user_skip_2) +ENTRY_CFI(fixup_put_user_skip_2) get_fault_ip %r1,%r8 ldo 8(%r1), %r1 bv %r0(%r1) ldi -EFAULT, %r8 -ENDPROC(fixup_put_user_skip_2) +ENDPROC_CFI(fixup_put_user_skip_2) diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index a512f07d4feb..56845de6b5df 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -67,7 +67,7 @@ * otherwise, returns number of bytes not transferred. */ -ENTRY(lclear_user) +ENTRY_CFI(lclear_user) .proc .callinfo NO_CALLS .entry @@ -81,7 +81,7 @@ $lclu_done: bv %r0(%r2) copy %r25,%r28 .exit -ENDPROC(lclear_user) +ENDPROC_CFI(lclear_user) .section .fixup,"ax" 2: fixup_branch $lclu_done @@ -100,7 +100,7 @@ ENDPROC(lclear_user) * else strlen + 1 (i.e. includes zero byte). */ -ENTRY(lstrnlen_user) +ENTRY_CFI(lstrnlen_user) .proc .callinfo NO_CALLS .entry @@ -120,7 +120,7 @@ $lslen_done: $lslen_nzero: b $lslen_done ldo 1(%r26),%r26 /* special case for N == 0 */ -ENDPROC(lstrnlen_user) +ENDPROC_CFI(lstrnlen_user) .section .fixup,"ax" 3: fixup_branch $lslen_done diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index b2b441b32341..f82ff10ed974 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -489,20 +489,23 @@ static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) } #ifdef __KERNEL__ -unsigned long copy_to_user(void __user *dst, const void *src, unsigned long len) +unsigned long __copy_to_user(void __user *dst, const void *src, + unsigned long len) { mtsp(get_kernel_space(), 1); mtsp(get_user_space(), 2); return pa_memcpy((void __force *)dst, src, len); } +EXPORT_SYMBOL(__copy_to_user); -EXPORT_SYMBOL(__copy_from_user); -unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long len) +unsigned long __copy_from_user(void *dst, const void __user *src, + unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_kernel_space(), 2); return pa_memcpy(dst, (void __force *)src, len); } +EXPORT_SYMBOL(__copy_from_user); unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) { @@ -520,8 +523,6 @@ void * memcpy(void * dst,const void *src, size_t count) return dst; } -EXPORT_SYMBOL(copy_to_user); -EXPORT_SYMBOL(copy_from_user); EXPORT_SYMBOL(copy_in_user); EXPORT_SYMBOL(memcpy); diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index 163af2c31d76..47a6ca4c9e40 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -168,6 +168,43 @@ int fixup_exception(struct pt_regs *regs) } /* + * parisc hardware trap list + * + * Documented in section 3 "Addressing and Access Control" of the + * "PA-RISC 1.1 Architecture and Instruction Set Reference Manual" + * https://parisc.wiki.kernel.org/index.php/File:Pa11_acd.pdf + * + * For implementation see handle_interruption() in traps.c + */ +static const char * const trap_description[] = { + [1] "High-priority machine check (HPMC)", + [2] "Power failure interrupt", + [3] "Recovery counter trap", + [5] "Low-priority machine check", + [6] "Instruction TLB miss fault", + [7] "Instruction access rights / protection trap", + [8] "Illegal instruction trap", + [9] "Break instruction trap", + [10] "Privileged operation trap", + [11] "Privileged register trap", + [12] "Overflow trap", + [13] "Conditional trap", + [14] "FP Assist Exception trap", + [15] "Data TLB miss fault", + [16] "Non-access ITLB miss fault", + [17] "Non-access DTLB miss fault", + [18] "Data memory protection/unaligned access trap", + [19] "Data memory break trap", + [20] "TLB dirty bit trap", + [21] "Page reference trap", + [22] "Assist emulation trap", + [25] "Taken branch trap", + [26] "Data memory access rights trap", + [27] "Data memory protection ID trap", + [28] "Unaligned data reference trap", +}; + +/* * Print out info about fatal segfaults, if the show_unhandled_signals * sysctl is set: */ @@ -176,6 +213,8 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, unsigned long address, struct task_struct *tsk, struct vm_area_struct *vma) { + const char *trap_name = NULL; + if (!unhandled_signal(tsk, SIGSEGV)) return; @@ -186,8 +225,15 @@ show_signal_msg(struct pt_regs *regs, unsigned long code, pr_warn("do_page_fault() command='%s' type=%lu address=0x%08lx", tsk->comm, code, address); print_vma_addr(KERN_CONT " in ", regs->iaoq[0]); + + if (code < ARRAY_SIZE(trap_description)) + trap_name = trap_description[code]; + pr_warn(KERN_CONT " trap #%lu: %s%c", code, + trap_name ? trap_name : "unknown", + vma ? ',':'\n'); + if (vma) - pr_warn(" vm_start = 0x%08lx, vm_end = 0x%08lx\n", + pr_warn(KERN_CONT " vm_start = 0x%08lx, vm_end = 0x%08lx\n", vma->vm_start, vma->vm_end); show_regs(regs); diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c index 6b3e7c6ee096..356f38473b5d 100644 --- a/arch/parisc/mm/init.c +++ b/arch/parisc/mm/init.c @@ -14,6 +14,7 @@ #include <linux/module.h> #include <linux/mm.h> #include <linux/bootmem.h> +#include <linux/memblock.h> #include <linux/gfp.h> #include <linux/delay.h> #include <linux/init.h> @@ -79,6 +80,34 @@ static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __read_mostly; physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __read_mostly; int npmem_ranges __read_mostly; +/* + * get_memblock() allocates pages via memblock. + * We can't use memblock_find_in_range(0, KERNEL_INITIAL_SIZE) here since it + * doesn't allocate from bottom to top which is needed because we only created + * the initial mapping up to KERNEL_INITIAL_SIZE in the assembly bootup code. + */ +static void * __init get_memblock(unsigned long size) +{ + static phys_addr_t search_addr __initdata; + phys_addr_t phys; + + if (!search_addr) + search_addr = PAGE_ALIGN(__pa((unsigned long) &_end)); + search_addr = ALIGN(search_addr, size); + while (!memblock_is_region_memory(search_addr, size) || + memblock_is_region_reserved(search_addr, size)) { + search_addr += size; + } + phys = search_addr; + + if (phys) + memblock_reserve(phys, size); + else + panic("get_memblock() failed.\n"); + + return __va(phys); +} + #ifdef CONFIG_64BIT #define MAX_MEM (~0UL) #else /* !CONFIG_64BIT */ @@ -118,11 +147,7 @@ static void __init mem_limit_func(void) static void __init setup_bootmem(void) { - unsigned long bootmap_size; unsigned long mem_max; - unsigned long bootmap_pages; - unsigned long bootmap_start_pfn; - unsigned long bootmap_pfn; #ifndef CONFIG_DISCONTIGMEM physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1]; int npmem_holes; @@ -178,33 +203,29 @@ static void __init setup_bootmem(void) } #endif - if (npmem_ranges > 1) { - - /* Print the memory ranges */ + /* Print the memory ranges */ + pr_info("Memory Ranges:\n"); - printk(KERN_INFO "Memory Ranges:\n"); + for (i = 0; i < npmem_ranges; i++) { + struct resource *res = &sysram_resources[i]; + unsigned long start; + unsigned long size; - for (i = 0; i < npmem_ranges; i++) { - unsigned long start; - unsigned long size; + size = (pmem_ranges[i].pages << PAGE_SHIFT); + start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); + pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", + i, start, start + (size - 1), size >> 20); - size = (pmem_ranges[i].pages << PAGE_SHIFT); - start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); - printk(KERN_INFO "%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n", - i,start, start + (size - 1), size >> 20); - } - } - - sysram_resource_count = npmem_ranges; - for (i = 0; i < sysram_resource_count; i++) { - struct resource *res = &sysram_resources[i]; + /* request memory resource */ res->name = "System RAM"; - res->start = pmem_ranges[i].start_pfn << PAGE_SHIFT; - res->end = res->start + (pmem_ranges[i].pages << PAGE_SHIFT)-1; + res->start = start; + res->end = start + size - 1; res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; request_resource(&iomem_resource, res); } + sysram_resource_count = npmem_ranges; + /* * For 32 bit kernels we limit the amount of memory we can * support, in order to preserve enough kernel address space @@ -263,16 +284,9 @@ static void __init setup_bootmem(void) } #endif - bootmap_pages = 0; - for (i = 0; i < npmem_ranges; i++) - bootmap_pages += bootmem_bootmap_pages(pmem_ranges[i].pages); - - bootmap_start_pfn = PAGE_ALIGN(__pa((unsigned long) &_end)) >> PAGE_SHIFT; - #ifdef CONFIG_DISCONTIGMEM for (i = 0; i < MAX_PHYSMEM_RANGES; i++) { memset(NODE_DATA(i), 0, sizeof(pg_data_t)); - NODE_DATA(i)->bdata = &bootmem_node_data[i]; } memset(pfnnid_map, 0xff, sizeof(pfnnid_map)); @@ -284,28 +298,24 @@ static void __init setup_bootmem(void) /* * Initialize and free the full range of memory in each range. - * Note that the only writing these routines do are to the bootmap, - * and we've made sure to locate the bootmap properly so that they - * won't be writing over anything important. */ - bootmap_pfn = bootmap_start_pfn; max_pfn = 0; for (i = 0; i < npmem_ranges; i++) { unsigned long start_pfn; unsigned long npages; + unsigned long start; + unsigned long size; start_pfn = pmem_ranges[i].start_pfn; npages = pmem_ranges[i].pages; - bootmap_size = init_bootmem_node(NODE_DATA(i), - bootmap_pfn, - start_pfn, - (start_pfn + npages) ); - free_bootmem_node(NODE_DATA(i), - (start_pfn << PAGE_SHIFT), - (npages << PAGE_SHIFT) ); - bootmap_pfn += (bootmap_size + PAGE_SIZE - 1) >> PAGE_SHIFT; + start = start_pfn << PAGE_SHIFT; + size = npages << PAGE_SHIFT; + + /* add system RAM memblock */ + memblock_add(start, size); + if ((start_pfn + npages) > max_pfn) max_pfn = start_pfn + npages; } @@ -317,32 +327,22 @@ static void __init setup_bootmem(void) */ max_low_pfn = max_pfn; - /* bootmap sizing messed up? */ - BUG_ON((bootmap_pfn - bootmap_start_pfn) != bootmap_pages); - /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */ #define PDC_CONSOLE_IO_IODC_SIZE 32768 - reserve_bootmem_node(NODE_DATA(0), 0UL, - (unsigned long)(PAGE0->mem_free + - PDC_CONSOLE_IO_IODC_SIZE), BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(0), __pa(KERNEL_BINARY_TEXT_START), - (unsigned long)(_end - KERNEL_BINARY_TEXT_START), - BOOTMEM_DEFAULT); - reserve_bootmem_node(NODE_DATA(0), (bootmap_start_pfn << PAGE_SHIFT), - ((bootmap_pfn - bootmap_start_pfn) << PAGE_SHIFT), - BOOTMEM_DEFAULT); + memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free + + PDC_CONSOLE_IO_IODC_SIZE)); + memblock_reserve(__pa(KERNEL_BINARY_TEXT_START), + (unsigned long)(_end - KERNEL_BINARY_TEXT_START)); #ifndef CONFIG_DISCONTIGMEM /* reserve the holes */ for (i = 0; i < npmem_holes; i++) { - reserve_bootmem_node(NODE_DATA(0), - (pmem_holes[i].start_pfn << PAGE_SHIFT), - (pmem_holes[i].pages << PAGE_SHIFT), - BOOTMEM_DEFAULT); + memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT), + (pmem_holes[i].pages << PAGE_SHIFT)); } #endif @@ -360,8 +360,7 @@ static void __init setup_bootmem(void) initrd_below_start_ok = 1; printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max); - reserve_bootmem_node(NODE_DATA(0), __pa(initrd_start), - initrd_reserve, BOOTMEM_DEFAULT); + memblock_reserve(__pa(initrd_start), initrd_reserve); } } #endif @@ -439,7 +438,7 @@ static void __init map_pages(unsigned long start_vaddr, */ if (!pmd) { - pmd = (pmd_t *) alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE << PMD_ORDER); + pmd = (pmd_t *) get_memblock(PAGE_SIZE << PMD_ORDER); pmd = (pmd_t *) __pa(pmd); } @@ -458,8 +457,7 @@ static void __init map_pages(unsigned long start_vaddr, pg_table = (pte_t *)pmd_address(*pmd); if (!pg_table) { - pg_table = (pte_t *) - alloc_bootmem_low_pages_node(NODE_DATA(0), PAGE_SIZE); + pg_table = (pte_t *) get_memblock(PAGE_SIZE); pg_table = (pte_t *) __pa(pg_table); } @@ -737,7 +735,7 @@ static void __init pagetable_init(void) } #endif - empty_zero_page = alloc_bootmem_pages(PAGE_SIZE); + empty_zero_page = get_memblock(PAGE_SIZE); } static void __init gateway_init(void) diff --git a/include/linux/init.h b/include/linux/init.h index 6935d02474aa..5a3321a7909b 100644 --- a/include/linux/init.h +++ b/include/linux/init.h @@ -41,22 +41,11 @@ discard it in modules) */ #define __init __section(.init.text) __cold notrace #define __initdata __section(.init.data) -#define __initconst __constsection(.init.rodata) +#define __initconst __section(.init.rodata) #define __exitdata __section(.exit.data) #define __exit_call __used __section(.exitcall.exit) /* - * Some architecture have tool chains which do not handle rodata attributes - * correctly. For those disable special sections for const, so that other - * architectures can annotate correctly. - */ -#ifdef CONFIG_BROKEN_RODATA -#define __constsection(x) -#else -#define __constsection(x) __section(x) -#endif - -/* * modpost check for section mismatches during the kernel build. * A section mismatch happens when there are references from a * code or data section to an init section (both code or data). @@ -75,7 +64,7 @@ */ #define __ref __section(.ref.text) noinline #define __refdata __section(.ref.data) -#define __refconst __constsection(.ref.rodata) +#define __refconst __section(.ref.rodata) #ifdef MODULE #define __exitused @@ -88,10 +77,10 @@ /* Used for MEMORY_HOTPLUG */ #define __meminit __section(.meminit.text) __cold notrace #define __meminitdata __section(.meminit.data) -#define __meminitconst __constsection(.meminit.rodata) +#define __meminitconst __section(.meminit.rodata) #define __memexit __section(.memexit.text) __exitused __cold notrace #define __memexitdata __section(.memexit.data) -#define __memexitconst __constsection(.memexit.rodata) +#define __memexitconst __section(.memexit.rodata) /* For assembly routines */ #define __HEAD .section ".head.text","ax" diff --git a/init/Kconfig b/init/Kconfig index 3b9a47fe843b..d7fc22639665 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -2118,12 +2118,6 @@ config PADATA depends on SMP bool -# Can be selected by architectures with broken toolchains -# that get confused by correct const<->read_only section -# mappings -config BROKEN_RODATA - bool - config ASN1 tristate help |