diff options
author | David S. Miller <davem@davemloft.net> | 2016-10-30 19:42:58 +0300 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2016-10-30 19:42:58 +0300 |
commit | 27058af401e49d88a905df000dd26f443fcfa8ce (patch) | |
tree | 819f32113d3b8374b9fbf72e2202d4c4d4511a60 /arch/powerpc/kernel | |
parent | 357f4aae859b5d74554b0ccbb18556f1df4166c3 (diff) | |
parent | 2a26d99b251b8625d27aed14e97fc10707a3a81f (diff) | |
download | linux-27058af401e49d88a905df000dd26f443fcfa8ce.tar.xz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Mostly simple overlapping changes.
For example, David Ahern's adjacency list revamp in 'net-next'
conflicted with an adjacency list traversal bug fix in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/powerpc/kernel')
29 files changed, 234 insertions, 231 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index aded29ad2e8f..1925341dbb9c 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -14,6 +14,11 @@ CFLAGS_prom_init.o += -fPIC CFLAGS_btext.o += -fPIC endif +CFLAGS_cputable.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_init.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_btext.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) +CFLAGS_prom.o += $(DISABLE_LATENT_ENTROPY_PLUGIN) + ifdef CONFIG_FUNCTION_TRACER # Do not trace early boot code CFLAGS_REMOVE_cputable.o = -mno-sched-epilog $(CC_FLAGS_FTRACE) @@ -90,10 +95,6 @@ obj-$(CONFIG_RELOCATABLE) += reloc_$(BITS).o obj-$(CONFIG_PPC32) += entry_32.o setup_32.o obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_MODULES) += ppc_ksyms.o -ifeq ($(CONFIG_PPC32),y) -obj-$(CONFIG_MODULES) += ppc_ksyms_32.o -endif obj-$(CONFIG_BOOTX_TEXT) += btext.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 6c4646ac9234..6a82ef039c50 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -1248,6 +1248,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .mmu_features = MMU_FTR_TYPE_8xx, .icache_bsize = 16, .dcache_bsize = 16, + .machine_check = machine_check_8xx, .platform = "ppc823", }, #endif /* CONFIG_8xx */ diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 83428a283fa0..3841d749a430 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S @@ -33,6 +33,7 @@ #include <asm/unistd.h> #include <asm/ftrace.h> #include <asm/ptrace.h> +#include <asm/export.h> /* * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE. @@ -1358,6 +1359,7 @@ _GLOBAL(_mcount) MCOUNT_RESTORE_FRAME bctr #endif +EXPORT_SYMBOL(_mcount) _GLOBAL(ftrace_stub) blr diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 51df82b61084..6432d4bf08c8 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -38,6 +38,7 @@ #include <asm/context_tracking.h> #include <asm/tm.h> #include <asm/ppc-opcode.h> +#include <asm/export.h> /* * System calls. @@ -1177,6 +1178,7 @@ _GLOBAL(enter_prom) #ifdef CONFIG_DYNAMIC_FTRACE _GLOBAL(mcount) _GLOBAL(_mcount) +EXPORT_SYMBOL(_mcount) mflr r12 mtctr r12 mtlr r0 @@ -1413,6 +1415,7 @@ livepatch_handler: #else _GLOBAL_TOC(_mcount) +EXPORT_SYMBOL(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) diff --git a/arch/powerpc/kernel/epapr_hcalls.S b/arch/powerpc/kernel/epapr_hcalls.S index 9f1ebf7338f1..52ca2471ee1a 100644 --- a/arch/powerpc/kernel/epapr_hcalls.S +++ b/arch/powerpc/kernel/epapr_hcalls.S @@ -16,6 +16,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> +#include <asm/export.h> #ifndef CONFIG_PPC64 /* epapr_ev_idle() was derived from e500_idle() */ @@ -53,3 +54,4 @@ epapr_hypercall_start: nop nop blr +EXPORT_SYMBOL(epapr_hypercall_start) diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 08992f8f5036..08ba447a4b3d 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -95,19 +95,35 @@ __start_interrupts: /* No virt vectors corresponding with 0x0..0x100 */ EXC_VIRT_NONE(0x4000, 0x4100) -EXC_REAL_BEGIN(system_reset, 0x100, 0x200) - SET_SCRATCH0(r13) + #ifdef CONFIG_PPC_P7_NAP -BEGIN_FTR_SECTION - /* Running native on arch 2.06 or later, check if we are - * waking up from nap/sleep/winkle. + /* + * If running native on arch 2.06 or later, check if we are waking up + * from nap/sleep/winkle, and branch to idle handler. */ - mfspr r13,SPRN_SRR1 - rlwinm. r13,r13,47-31,30,31 - beq 9f +#define IDLETEST(n) \ + BEGIN_FTR_SECTION ; \ + mfspr r10,SPRN_SRR1 ; \ + rlwinm. r10,r10,47-31,30,31 ; \ + beq- 1f ; \ + cmpwi cr3,r10,2 ; \ + BRANCH_TO_COMMON(r10, system_reset_idle_common) ; \ +1: \ + END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) +#else +#define IDLETEST NOTEST +#endif - cmpwi cr3,r13,2 - GET_PACA(r13) +EXC_REAL_BEGIN(system_reset, 0x100, 0x200) + SET_SCRATCH0(r13) + EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, + IDLETEST, 0x100) + +EXC_REAL_END(system_reset, 0x100, 0x200) +EXC_VIRT_NONE(0x4100, 0x4200) + +#ifdef CONFIG_PPC_P7_NAP +EXC_COMMON_BEGIN(system_reset_idle_common) bl pnv_restore_hyp_resource li r0,PNV_THREAD_RUNNING @@ -130,14 +146,8 @@ BEGIN_FTR_SECTION blt cr3,2f b pnv_wakeup_loss 2: b pnv_wakeup_noloss +#endif -9: -END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) -#endif /* CONFIG_PPC_P7_NAP */ - EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD, - NOTEST, 0x100) -EXC_REAL_END(system_reset, 0x100, 0x200) -EXC_VIRT_NONE(0x4100, 0x4200) EXC_COMMON(system_reset_common, 0x100, system_reset_exception) #ifdef CONFIG_PPC_PSERIES @@ -817,10 +827,8 @@ EXC_VIRT(trap_0b, 0x4b00, 0x4c00, 0xb00) TRAMP_KVM(PACA_EXGEN, 0xb00) EXC_COMMON(trap_0b_common, 0xb00, unknown_exception) - -#define LOAD_SYSCALL_HANDLER(reg) \ - ld reg,PACAKBASE(r13); \ - ori reg,reg,(ABS_ADDR(system_call_common))@l; +#define LOAD_SYSCALL_HANDLER(reg) \ + __LOAD_HANDLER(reg, system_call_common) /* Syscall routine is used twice, in reloc-off and reloc-on paths */ #define SYSCALL_PSERIES_1 \ @@ -1377,7 +1385,7 @@ __end_interrupts: DEFINE_FIXED_SYMBOL(__end_interrupts) #ifdef CONFIG_PPC_970_NAP -TRAMP_REAL_BEGIN(power4_fixup_nap) +EXC_COMMON_BEGIN(power4_fixup_nap) andc r9,r9,r10 std r9,TI_LOCAL_FLAGS(r11) ld r10,_LINK(r1) /* make idle task do the */ diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 08d14b096eb9..6c509f39bbde 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -24,6 +24,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/export.h> #ifdef CONFIG_VSX #define __REST_32FPVSRS(n,c,base) \ @@ -59,6 +60,7 @@ _GLOBAL(load_fp_state) MTFSF_L(fr0) REST_32FPVSRS(0, R4, R3) blr +EXPORT_SYMBOL(load_fp_state) /* * Store FP state into memory, including FPSCR @@ -69,6 +71,7 @@ _GLOBAL(store_fp_state) mffs fr0 stfd fr0,FPSTATE_FPSCR(r3) blr +EXPORT_SYMBOL(store_fp_state) /* * This task wants to use the FPU now. diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index a3f821eb7e9a..9d963547d243 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -34,6 +34,7 @@ #include <asm/ptrace.h> #include <asm/bug.h> #include <asm/kvm_book3s_asm.h> +#include <asm/export.h> /* 601 only have IBAT; cr0.eq is set on 601 when using this macro */ #define LOAD_BAT(n, reg, RA, RB) \ @@ -738,6 +739,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU) .globl mol_trampoline .set mol_trampoline, i0x2f00 + EXPORT_SYMBOL(mol_trampoline) . = 0x3000 @@ -1045,6 +1047,7 @@ _ENTRY(switch_mmu_context) 4: trap EMIT_BUG_ENTRY 4b,__FILE__,__LINE__,0 blr +EXPORT_SYMBOL(switch_mmu_context) /* * An undocumented "feature" of 604e requires that the v bit @@ -1272,6 +1275,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: @@ -1285,6 +1289,7 @@ intercept_table: .long 0, 0, 0, 0, 0, 0, 0, 0 .long 0, 0, 0, 0, 0, 0, 0, 0 .long 0, 0, 0, 0, 0, 0, 0, 0 +EXPORT_SYMBOL(intercept_table) /* Room for two PTE pointers, usually the kernel and current user pointers * to their respective root page table. diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index 7d7d8635227a..41374a468d1c 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S @@ -41,6 +41,7 @@ #include <asm/ppc_asm.h> #include <asm/asm-offsets.h> #include <asm/ptrace.h> +#include <asm/export.h> /* As with the other PowerPC ports, it is expected that when code * execution begins here, the following registers contain valid, yet @@ -971,6 +972,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: .space PGD_TABLE_SIZE diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 9cdf5c71e426..37e4a7cf0065 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S @@ -39,6 +39,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/synch.h> +#include <asm/export.h> #include "head_booke.h" @@ -1254,6 +1255,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) /* * To support >32-bit physical addresses, we use an 8KB pgdir. diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 79da0641bae2..04c546e20cc0 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -43,6 +43,7 @@ #include <asm/hw_irq.h> #include <asm/cputhreads.h> #include <asm/ppc-opcode.h> +#include <asm/export.h> /* The physical memory is laid out such that the secondary processor * spin code sits at 0x0000...0x00ff. On server, the vectors follow @@ -1002,3 +1003,4 @@ swapper_pg_dir: .globl empty_zero_page empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 3a185c51ce8f..fb133a163263 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S @@ -31,6 +31,7 @@ #include <asm/asm-offsets.h> #include <asm/ptrace.h> #include <asm/fixmap.h> +#include <asm/export.h> /* Macro to make the code more readable. */ #ifdef CONFIG_8xx_CPU6 @@ -226,7 +227,7 @@ i##n: \ ret_from_except) /* System reset */ - EXCEPTION(0x100, Reset, unknown_exception, EXC_XFER_STD) + EXCEPTION(0x100, Reset, system_reset_exception, EXC_XFER_STD) /* Machine check */ . = 0x200 @@ -321,7 +322,7 @@ SystemCall: #endif InstructionTLBMiss: -#ifdef CONFIG_8xx_CPU6 +#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) mtspr SPRN_SPRG_SCRATCH2, r3 #endif EXCEPTION_PROLOG_0 @@ -329,23 +330,20 @@ InstructionTLBMiss: /* If we are faulting a kernel address, we have to use the * kernel page tables. */ + mfspr r10, SPRN_SRR0 /* Get effective address of fault */ + INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) #if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) /* Only modules will cause ITLB Misses as we always * pin the first 8MB of kernel memory */ - mfspr r11, SPRN_SRR0 /* Get effective address of fault */ - INVALIDATE_ADJACENT_PAGES_CPU15(r10, r11) - mfcr r10 - IS_KERNEL(r11, r11) + mfcr r3 + IS_KERNEL(r11, r10) +#endif mfspr r11, SPRN_M_TW /* Get level 1 table */ +#if defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) BRANCH_UNLESS_KERNEL(3f) lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 3: - mtcr r10 - mfspr r10, SPRN_SRR0 /* Get effective address of fault */ -#else - mfspr r10, SPRN_SRR0 /* Get effective address of fault */ - INVALIDATE_ADJACENT_PAGES_CPU15(r11, r10) - mfspr r11, SPRN_M_TW /* Get level 1 table base address */ + mtcr r3 #endif /* Insert level 1 index */ rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 @@ -377,58 +375,39 @@ InstructionTLBMiss: MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ -#ifdef CONFIG_8xx_CPU6 +#if defined(CONFIG_8xx_CPU6) || defined(CONFIG_MODULES) || defined (CONFIG_DEBUG_PAGEALLOC) mfspr r3, SPRN_SPRG_SCRATCH2 #endif EXCEPTION_EPILOG_0 rfi -/* - * Bottom part of DataStoreTLBMiss handler for IMMR area - * not enough space in the DataStoreTLBMiss area - */ -DTLBMissIMMR: - mtcr r10 - /* Set 512k byte guarded page and mark it valid */ - li r10, MD_PS512K | MD_GUARDED | MD_SVALID - MTSPR_CPU6(SPRN_MD_TWC, r10, r11) - mfspr r10, SPRN_IMMR /* Get current IMMR */ - rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ - _PAGE_PRESENT | _PAGE_NO_CACHE - MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */ - - li r11, RPN_PATTERN - mtspr SPRN_DAR, r11 /* Tag DAR */ - EXCEPTION_EPILOG_0 - rfi - . = 0x1200 DataStoreTLBMiss: + mtspr SPRN_SPRG_SCRATCH2, r3 EXCEPTION_PROLOG_0 - mfcr r10 + mfcr r3 /* If we are faulting a kernel address, we have to use the * kernel page tables. */ - mfspr r11, SPRN_MD_EPN - rlwinm r11, r11, 16, 0xfff8 + mfspr r10, SPRN_MD_EPN + rlwinm r10, r10, 16, 0xfff8 + cmpli cr0, r10, PAGE_OFFSET@h + mfspr r11, SPRN_M_TW /* Get level 1 table */ + blt+ 3f #ifndef CONFIG_PIN_TLB_IMMR - cmpli cr0, r11, VIRT_IMMR_BASE@h + cmpli cr0, r10, VIRT_IMMR_BASE@h #endif - cmpli cr7, r11, PAGE_OFFSET@h +_ENTRY(DTLBMiss_cmp) + cmpli cr7, r10, (PAGE_OFFSET + 0x1800000)@h + lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha #ifndef CONFIG_PIN_TLB_IMMR _ENTRY(DTLBMiss_jmp) beq- DTLBMissIMMR #endif - bge- cr7, 4f - - mfspr r11, SPRN_M_TW /* Get level 1 table */ + blt cr7, DTLBMissLinear 3: - mtcr r10 -#ifdef CONFIG_8xx_CPU6 - mtspr SPRN_SPRG_SCRATCH2, r3 -#endif + mtcr r3 mfspr r10, SPRN_MD_EPN /* Insert level 1 index */ @@ -481,30 +460,7 @@ _ENTRY(DTLBMiss_jmp) MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */ /* Restore registers */ -#ifdef CONFIG_8xx_CPU6 mfspr r3, SPRN_SPRG_SCRATCH2 -#endif - mtspr SPRN_DAR, r11 /* Tag DAR */ - EXCEPTION_EPILOG_0 - rfi - -4: -_ENTRY(DTLBMiss_cmp) - cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h - lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha - bge- 3b - - mtcr r10 - /* Set 8M byte page and mark it valid */ - li r10, MD_PS8MEG | MD_SVALID - MTSPR_CPU6(SPRN_MD_TWC, r10, r11) - mfspr r10, SPRN_MD_EPN - rlwinm r10, r10, 0, 0x0f800000 /* 8xx supports max 256Mb RAM */ - ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ - _PAGE_PRESENT - MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */ - - li r11, RPN_PATTERN mtspr SPRN_DAR, r11 /* Tag DAR */ EXCEPTION_EPILOG_0 rfi @@ -570,6 +526,43 @@ DARFixed:/* Return from dcbx instruction bug workaround */ . = 0x2000 +/* + * Bottom part of DataStoreTLBMiss handlers for IMMR area and linear RAM. + * not enough space in the DataStoreTLBMiss area. + */ +DTLBMissIMMR: + mtcr r3 + /* Set 512k byte guarded page and mark it valid */ + li r10, MD_PS512K | MD_GUARDED | MD_SVALID + MTSPR_CPU6(SPRN_MD_TWC, r10, r11) + mfspr r10, SPRN_IMMR /* Get current IMMR */ + rlwinm r10, r10, 0, 0xfff80000 /* Get 512 kbytes boundary */ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ + _PAGE_PRESENT | _PAGE_NO_CACHE + MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */ + + li r11, RPN_PATTERN + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r3, SPRN_SPRG_SCRATCH2 + EXCEPTION_EPILOG_0 + rfi + +DTLBMissLinear: + mtcr r3 + /* Set 8M byte page and mark it valid */ + li r11, MD_PS8MEG | MD_SVALID + MTSPR_CPU6(SPRN_MD_TWC, r11, r3) + rlwinm r10, r10, 16, 0x0f800000 /* 8xx supports max 256Mb RAM */ + ori r10, r10, 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY | \ + _PAGE_PRESENT + MTSPR_CPU6(SPRN_MD_RPN, r10, r11) /* Update TLB entry */ + + li r11, RPN_PATTERN + mtspr SPRN_DAR, r11 /* Tag DAR */ + mfspr r3, SPRN_SPRG_SCRATCH2 + EXCEPTION_EPILOG_0 + rfi + /* This is the procedure to calculate the data EA for buggy dcbx,dcbi instructions * by decoding the registers used by the dcbx instruction and adding them. * DAR is set to the calculated address. @@ -586,7 +579,9 @@ FixupDAR:/* Entry point for dcbx workaround. */ rlwinm r11, r10, 16, 0xfff8 _ENTRY(FixupDAR_cmp) cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h - blt- cr7, 200f + /* create physical page address from effective address */ + tophys(r11, r10) + blt- cr7, 201f lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha /* Insert level 1 index */ 3: rlwimi r11, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29 @@ -616,10 +611,6 @@ _ENTRY(FixupDAR_cmp) 141: mfspr r10,SPRN_SPRG_SCRATCH2 b DARFixed /* Nope, go back to normal TLB processing */ - /* create physical page address from effective address */ -200: tophys(r11, r10) - b 201b - 144: mfspr r10, SPRN_DSISR rlwinm r10, r10,0,7,5 /* Clear store bit for buggy dcbst insn */ mtspr SPRN_DSISR, r10 @@ -894,6 +885,7 @@ sdata: .align PAGE_SHIFT empty_zero_page: .space PAGE_SIZE +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 3bfa3150911f..bf4c6021515f 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S @@ -42,6 +42,7 @@ #include <asm/asm-offsets.h> #include <asm/cache.h> #include <asm/ptrace.h> +#include <asm/export.h> #include "head_booke.h" /* As with the other PowerPC ports, it is expected that when code @@ -1223,6 +1224,7 @@ sdata: .globl empty_zero_page empty_zero_page: .space 4096 +EXPORT_SYMBOL(empty_zero_page) .globl swapper_pg_dir swapper_pg_dir: .space PGD_TABLE_SIZE diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c index 9781c69eae57..03d089b3ed72 100644 --- a/arch/powerpc/kernel/hw_breakpoint.c +++ b/arch/powerpc/kernel/hw_breakpoint.c @@ -275,7 +275,7 @@ int hw_breakpoint_handler(struct die_args *args) if (!stepped) { WARN(1, "Unable to handle hardware breakpoint. Breakpoint at " "0x%lx will be disabled.", info->address); - perf_event_disable(bp); + perf_event_disable_inatomic(bp); goto out; } /* diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index bd739fed26e3..72dac0b58061 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -90,6 +90,7 @@ ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300) * Threads will spin in HMT_LOW until the lock bit is cleared. * r14 - pointer to core_idle_state * r15 - used to load contents of core_idle_state + * r9 - used as a temporary variable */ core_idle_lock_held: @@ -99,6 +100,8 @@ core_idle_lock_held: bne 3b HMT_MEDIUM lwarx r15,0,r14 + andi. r9,r15,PNV_CORE_IDLE_LOCK_BIT + bne core_idle_lock_held blr /* @@ -163,12 +166,6 @@ _GLOBAL(pnv_powersave_common) std r9,_MSR(r1) std r1,PACAR1(r13) -#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE - /* Tell KVM we're entering idle */ - li r4,KVM_HWTHREAD_IN_IDLE - stb r4,HSTATE_HWTHREAD_STATE(r13) -#endif - /* * Go to real mode to do the nap, as required by the architecture. * Also, we need to be in real mode before setting hwthread_state, @@ -185,6 +182,26 @@ _GLOBAL(pnv_powersave_common) .globl pnv_enter_arch207_idle_mode pnv_enter_arch207_idle_mode: +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* Tell KVM we're entering idle */ + li r4,KVM_HWTHREAD_IN_IDLE + /******************************************************/ + /* N O T E W E L L ! ! ! N O T E W E L L */ + /* The following store to HSTATE_HWTHREAD_STATE(r13) */ + /* MUST occur in real mode, i.e. with the MMU off, */ + /* and the MMU must stay off until we clear this flag */ + /* and test HSTATE_HWTHREAD_REQ(r13) in the system */ + /* reset interrupt vector in exceptions-64s.S. */ + /* The reason is that another thread can switch the */ + /* MMU to a guest context whenever this flag is set */ + /* to KVM_HWTHREAD_IN_IDLE, and if the MMU was on, */ + /* that would potentially cause this thread to start */ + /* executing instructions from guest memory in */ + /* hypervisor mode, leading to a host crash or data */ + /* corruption, or worse. */ + /******************************************************/ + stb r4,HSTATE_HWTHREAD_STATE(r13) +#endif stb r3,PACA_THREAD_IDLE_STATE(r13) cmpwi cr3,r3,PNV_THREAD_SLEEP bge cr3,2f @@ -250,6 +267,12 @@ enter_winkle: * r3 - requested stop state */ power_enter_stop: +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE + /* Tell KVM we're entering idle */ + li r4,KVM_HWTHREAD_IN_IDLE + /* DO THIS IN REAL MODE! See comment above. */ + stb r4,HSTATE_HWTHREAD_STATE(r13) +#endif /* * Check if the requested state is a deep idle state. */ diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 0d432194c018..384357cb8bc0 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S @@ -18,6 +18,7 @@ #include <asm/unistd.h> #include <asm/asm-compat.h> #include <asm/asm-offsets.h> +#include <asm/export.h> .text @@ -118,3 +119,4 @@ _GLOBAL(longjmp) _GLOBAL(current_stack_pointer) PPC_LL r3,0(r1) blr +EXPORT_SYMBOL(current_stack_pointer) diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 03756ffdcd71..93cf7a5846a6 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -33,6 +33,7 @@ #include <asm/kexec.h> #include <asm/bug.h> #include <asm/ptrace.h> +#include <asm/export.h> .text @@ -319,6 +320,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_UNIFIED_ID_CACHE) #endif /* CONFIG_4xx */ isync blr +EXPORT_SYMBOL(flush_instruction_cache) #endif /* CONFIG_PPC_8xx */ /* @@ -359,6 +361,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) isync blr _ASM_NOKPROBE_SYMBOL(flush_icache_range) +EXPORT_SYMBOL(flush_icache_range) /* * Flush a particular page from the data cache to RAM. @@ -497,6 +500,7 @@ _GLOBAL(copy_page) li r0,MAX_COPY_PREFETCH li r11,4 b 2b +EXPORT_SYMBOL(copy_page) /* * Extended precision shifts. @@ -524,6 +528,7 @@ _GLOBAL(__ashrdi3) sraw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr +EXPORT_SYMBOL(__ashrdi3) _GLOBAL(__ashldi3) subfic r6,r5,32 @@ -535,6 +540,7 @@ _GLOBAL(__ashldi3) slw r4,r4,r5 # LSW = LSW << count or r3,r3,r7 # MSW |= t2 blr +EXPORT_SYMBOL(__ashldi3) _GLOBAL(__lshrdi3) subfic r6,r5,32 @@ -546,6 +552,7 @@ _GLOBAL(__lshrdi3) srw r3,r3,r5 # MSW = MSW >> count or r4,r4,r7 # LSW |= t2 blr +EXPORT_SYMBOL(__lshrdi3) /* * 64-bit comparison: __cmpdi2(s64 a, s64 b) @@ -561,6 +568,7 @@ _GLOBAL(__cmpdi2) bltlr li r3,2 blr +EXPORT_SYMBOL(__cmpdi2) /* * 64-bit comparison: __ucmpdi2(u64 a, u64 b) * Returns 0 if a < b, 1 if a == b, 2 if a > b. @@ -575,6 +583,7 @@ _GLOBAL(__ucmpdi2) bltlr li r3,2 blr +EXPORT_SYMBOL(__ucmpdi2) _GLOBAL(__bswapdi2) rotlwi r9,r4,8 @@ -586,6 +595,7 @@ _GLOBAL(__bswapdi2) mr r3,r9 mr r4,r10 blr +EXPORT_SYMBOL(__bswapdi2) #ifdef CONFIG_SMP _GLOBAL(start_secondary_resume) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 9f0bed214bcb..4f178671f230 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -27,6 +27,7 @@ #include <asm/kexec.h> #include <asm/ptrace.h> #include <asm/mmu.h> +#include <asm/export.h> .text @@ -110,6 +111,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) isync blr _ASM_NOKPROBE_SYMBOL(flush_icache_range) +EXPORT_SYMBOL(flush_icache_range) /* * Like above, but only do the D-cache. @@ -140,6 +142,7 @@ _GLOBAL(flush_dcache_range) bdnz 0b sync blr +EXPORT_SYMBOL(flush_dcache_range) /* * Like above, but works on non-mapped physical addresses. @@ -243,6 +246,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_COHERENT_ICACHE) blr _GLOBAL(__bswapdi2) +EXPORT_SYMBOL(__bswapdi2) srdi r8,r3,32 rlwinm r7,r3,8,0xffffffff rlwimi r7,r3,24,0,7 diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 95d3769a2e26..74bec5498972 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c @@ -56,6 +56,7 @@ static DECLARE_BITMAP(phb_bitmap, MAX_PHBS); /* ISA Memory physical address */ resource_size_t isa_mem_base; +EXPORT_SYMBOL(isa_mem_base); static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index 1f7930037cb7..678f87a63645 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c @@ -32,6 +32,8 @@ unsigned long isa_io_base = 0; unsigned long pci_dram_offset = 0; int pcibios_assign_bus_offset = 1; +EXPORT_SYMBOL(isa_io_base); +EXPORT_SYMBOL(pci_dram_offset); void pcibios_make_OF_bus_map(void); diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c deleted file mode 100644 index 9f01e28ecef3..000000000000 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ /dev/null @@ -1,37 +0,0 @@ -#include <linux/ftrace.h> -#include <linux/mm.h> - -#include <asm/processor.h> -#include <asm/switch_to.h> -#include <asm/cacheflush.h> -#include <asm/epapr_hcalls.h> - -#ifdef CONFIG_PPC64 -EXPORT_SYMBOL(flush_dcache_range); -#endif -EXPORT_SYMBOL(flush_icache_range); - -EXPORT_SYMBOL(empty_zero_page); - -long long __bswapdi2(long long); -EXPORT_SYMBOL(__bswapdi2); - -#ifdef CONFIG_FUNCTION_TRACER -EXPORT_SYMBOL(_mcount); -#endif - -#ifdef CONFIG_PPC_FPU -EXPORT_SYMBOL(load_fp_state); -EXPORT_SYMBOL(store_fp_state); -#endif - -#ifdef CONFIG_ALTIVEC -EXPORT_SYMBOL(load_vr_state); -EXPORT_SYMBOL(store_vr_state); -#endif - -#ifdef CONFIG_EPAPR_PARAVIRT -EXPORT_SYMBOL(epapr_hypercall_start); -#endif - -EXPORT_SYMBOL(current_stack_pointer); diff --git a/arch/powerpc/kernel/ppc_ksyms_32.c b/arch/powerpc/kernel/ppc_ksyms_32.c deleted file mode 100644 index 2bfaafe5be99..000000000000 --- a/arch/powerpc/kernel/ppc_ksyms_32.c +++ /dev/null @@ -1,60 +0,0 @@ -#include <linux/export.h> -#include <linux/smp.h> - -#include <asm/page.h> -#include <asm/dma.h> -#include <asm/io.h> -#include <asm/hw_irq.h> -#include <asm/time.h> -#include <asm/mmu_context.h> -#include <asm/pgtable.h> -#include <asm/dcr.h> - -EXPORT_SYMBOL(ISA_DMA_THRESHOLD); -EXPORT_SYMBOL(DMA_MODE_READ); -EXPORT_SYMBOL(DMA_MODE_WRITE); - -#if defined(CONFIG_PCI) -EXPORT_SYMBOL(isa_io_base); -EXPORT_SYMBOL(isa_mem_base); -EXPORT_SYMBOL(pci_dram_offset); -#endif - -#ifdef CONFIG_SMP -EXPORT_SYMBOL(smp_hw_index); -#endif - -long long __ashrdi3(long long, int); -long long __ashldi3(long long, int); -long long __lshrdi3(long long, int); -int __ucmpdi2(unsigned long long, unsigned long long); -int __cmpdi2(long long, long long); -EXPORT_SYMBOL(__ashrdi3); -EXPORT_SYMBOL(__ashldi3); -EXPORT_SYMBOL(__lshrdi3); -EXPORT_SYMBOL(__ucmpdi2); -EXPORT_SYMBOL(__cmpdi2); - -EXPORT_SYMBOL(timer_interrupt); -EXPORT_SYMBOL(tb_ticks_per_jiffy); - -EXPORT_SYMBOL(switch_mmu_context); - -#ifdef CONFIG_PPC_STD_MMU_32 -extern long mol_trampoline; -EXPORT_SYMBOL(mol_trampoline); /* For MOL */ -EXPORT_SYMBOL(flush_hash_pages); /* For MOL */ -#ifdef CONFIG_SMP -extern int mmu_hash_lock; -EXPORT_SYMBOL(mmu_hash_lock); /* For MOL */ -#endif /* CONFIG_SMP */ -extern long *intercept_table; -EXPORT_SYMBOL(intercept_table); -#endif /* CONFIG_PPC_STD_MMU_32 */ - -#ifdef CONFIG_PPC_DCR_NATIVE -EXPORT_SYMBOL(__mtdcr); -EXPORT_SYMBOL(__mfdcr); -#endif - -EXPORT_SYMBOL(flush_instruction_cache); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 9e7c10fe205f..ce6dc61b15b2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1012,7 +1012,7 @@ void restore_tm_state(struct pt_regs *regs) /* Ensure that restore_math() will restore */ if (msr_diff & MSR_FP) current->thread.load_fp = 1; -#ifdef CONFIG_ALIVEC +#ifdef CONFIG_ALTIVEC if (cpu_has_feature(CPU_FTR_ALTIVEC) && msr_diff & MSR_VEC) current->thread.load_vec = 1; #endif diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index f52b7db327c8..010b7b310237 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c @@ -74,7 +74,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; copied = access_process_vm(child, (u64)addrOthers, &tmp, - sizeof(tmp), 0); + sizeof(tmp), FOLL_FORCE); if (copied != sizeof(tmp)) break; ret = put_user(tmp, (u32 __user *)data); @@ -179,7 +179,8 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, break; ret = 0; if (access_process_vm(child, (u64)addrOthers, &tmp, - sizeof(tmp), 1) == sizeof(tmp)) + sizeof(tmp), + FOLL_FORCE | FOLL_WRITE) == sizeof(tmp)) break; ret = -EIO; break; diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index dba265c586df..270ee30abdcf 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -131,15 +131,26 @@ void machine_shutdown(void) ppc_md.machine_shutdown(); } +static void machine_hang(void) +{ + pr_emerg("System Halted, OK to turn off power\n"); + local_irq_disable(); + while (1) + ; +} + void machine_restart(char *cmd) { machine_shutdown(); if (ppc_md.restart) ppc_md.restart(cmd); + smp_send_stop(); - printk(KERN_EMERG "System Halted, OK to turn off power\n"); - local_irq_disable(); - while (1) ; + + do_kernel_restart(cmd); + mdelay(1000); + + machine_hang(); } void machine_power_off(void) @@ -147,10 +158,9 @@ void machine_power_off(void) machine_shutdown(); if (pm_power_off) pm_power_off(); + smp_send_stop(); - printk(KERN_EMERG "System Halted, OK to turn off power\n"); - local_irq_disable(); - while (1) ; + machine_hang(); } /* Used by the G5 thermal driver */ EXPORT_SYMBOL_GPL(machine_power_off); @@ -163,10 +173,9 @@ void machine_halt(void) machine_shutdown(); if (ppc_md.halt) ppc_md.halt(); + smp_send_stop(); - printk(KERN_EMERG "System Halted, OK to turn off power\n"); - local_irq_disable(); - while (1) ; + machine_hang(); } diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 24ec3ea4b3a2..5fe79182f0fa 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c @@ -16,6 +16,7 @@ #include <linux/cpu.h> #include <linux/console.h> #include <linux/memblock.h> +#include <linux/export.h> #include <asm/io.h> #include <asm/prom.h> @@ -47,11 +48,16 @@ int boot_cpuid_phys; EXPORT_SYMBOL_GPL(boot_cpuid_phys); int smp_hw_index[NR_CPUS]; +EXPORT_SYMBOL(smp_hw_index); unsigned long ISA_DMA_THRESHOLD; unsigned int DMA_MODE_READ; unsigned int DMA_MODE_WRITE; +EXPORT_SYMBOL(ISA_DMA_THRESHOLD); +EXPORT_SYMBOL(DMA_MODE_READ); +EXPORT_SYMBOL(DMA_MODE_WRITE); + /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 67859b7d1c97..bc3f7d0d7b79 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c @@ -596,6 +596,7 @@ void timer_interrupt(struct pt_regs * regs) irq_exit(); set_irq_regs(old_regs); } +EXPORT_SYMBOL(timer_interrupt); /* * Hypervisor decrementer interrupts shouldn't occur but are sometimes diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a1f8f5641e9e..023a462725b5 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -273,7 +273,6 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) force_sig_info(signr, &info, current); } -#ifdef CONFIG_PPC64 void system_reset_exception(struct pt_regs *regs) { /* See if any machine dependent calls */ @@ -291,6 +290,7 @@ void system_reset_exception(struct pt_regs *regs) /* What should we do here? We could issue a shutdown or hard reset. */ } +#ifdef CONFIG_PPC64 /* * This function is called in real mode. Strictly no printk's please. * @@ -352,12 +352,11 @@ static inline int check_io_access(struct pt_regs *regs) * For the debug message, we look at the preceding * load or store. */ - if (*nip == 0x60000000) /* nop */ + if (*nip == PPC_INST_NOP) nip -= 2; - else if (*nip == 0x4c00012c) /* isync */ + else if (*nip == PPC_INST_ISYNC) --nip; - if (*nip == 0x7c0004ac || (*nip >> 26) == 3) { - /* sync or twi */ + if (*nip == PPC_INST_SYNC || (*nip >> 26) == OP_TRAP) { unsigned int rb; --nip; @@ -668,6 +667,31 @@ int machine_check_e200(struct pt_regs *regs) return 0; } +#elif defined(CONFIG_PPC_8xx) +int machine_check_8xx(struct pt_regs *regs) +{ + unsigned long reason = get_mc_reason(regs); + + pr_err("Machine check in kernel mode.\n"); + pr_err("Caused by (from SRR1=%lx): ", reason); + if (reason & 0x40000000) + pr_err("Fetch error at address %lx\n", regs->nip); + else + pr_err("Data access error at address %lx\n", regs->dar); + +#ifdef CONFIG_PCI + /* the qspan pci read routines can cause machine checks -- Cort + * + * yuck !!! that totally needs to go away ! There are better ways + * to deal with that than having a wart in the mcheck handler. + * -- BenH + */ + bad_page_fault(regs, regs->dar, SIGBUS); + return 1; +#else + return 0; +#endif +} #else int machine_check_generic(struct pt_regs *regs) { @@ -727,17 +751,6 @@ void machine_check_exception(struct pt_regs *regs) if (recover > 0) goto bail; -#if defined(CONFIG_8xx) && defined(CONFIG_PCI) - /* the qspan pci read routines can cause machine checks -- Cort - * - * yuck !!! that totally needs to go away ! There are better ways - * to deal with that than having a wart in the mcheck handler. - * -- BenH - */ - bad_page_fault(regs, regs->dar, SIGBUS); - goto bail; -#endif - if (debugger_fault_handler(regs)) goto bail; diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index bc85bdff4e01..0c123f3406cd 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -6,6 +6,7 @@ #include <asm/thread_info.h> #include <asm/page.h> #include <asm/ptrace.h> +#include <asm/export.h> /* * Load state from memory into VMX registers including VSCR. @@ -17,6 +18,7 @@ _GLOBAL(load_vr_state) mtvscr v0 REST_32VRS(0,r4,r3) blr +EXPORT_SYMBOL(load_vr_state) /* * Store VMX state into memory, including VSCR. @@ -28,6 +30,7 @@ _GLOBAL(store_vr_state) li r4, VRSTATE_VSCR stvx v0, r4, r3 blr +EXPORT_SYMBOL(store_vr_state) /* * Disable VMX for the task which had it previously, |