diff options
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/cpu_setup_fsl_booke.S | 28 | ||||
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 117 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64e.S | 140 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 206 | ||||
-rw-r--r-- | arch/powerpc/kernel/ftrace.c | 137 | ||||
-rw-r--r-- | arch/powerpc/kernel/head_64.S | 117 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_book3e.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power4.S | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/idle_power7.S | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/misc_64.S | 46 | ||||
-rw-r--r-- | arch/powerpc/kernel/module_64.c | 279 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 17 | ||||
-rw-r--r-- | arch/powerpc/kernel/prom_init_check.sh | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/setup_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/kernel/systbl.S | 18 | ||||
-rw-r--r-- | arch/powerpc/kernel/tm.S | 13 |
16 files changed, 635 insertions, 495 deletions
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index cc2d8962e090..4f1393d20079 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S @@ -94,12 +94,12 @@ _GLOBAL(setup_altivec_idle) _GLOBAL(__setup_cpu_e6500) mflr r6 #ifdef CONFIG_PPC64 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: #endif bl setup_pw20_idle @@ -164,15 +164,15 @@ _GLOBAL(__setup_cpu_e5500) #ifdef CONFIG_PPC_BOOK3E_64 _GLOBAL(__restore_cpu_e6500) mflr r5 - bl .setup_altivec_ivors + bl setup_altivec_ivors /* Touch IVOR42 only if the CPU supports E.HV category */ mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_lrat_ivor + bl setup_lrat_ivor 1: - bl .setup_pw20_idle - bl .setup_altivec_idle + bl setup_pw20_idle + bl setup_altivec_idle bl __restore_cpu_e5500 mtlr r5 blr @@ -181,9 +181,9 @@ _GLOBAL(__restore_cpu_e5500) mflr r4 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -192,7 +192,7 @@ _GLOBAL(__restore_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors 1: mtlr r4 blr @@ -201,9 +201,9 @@ _GLOBAL(__setup_cpu_e5500) mflr r5 bl __e500_icache_setup bl __e500_dcache_setup - bl .__setup_base_ivors - bl .setup_perfmon_ivor - bl .setup_doorbell_ivors + bl __setup_base_ivors + bl setup_perfmon_ivor + bl setup_doorbell_ivors /* * We only want to touch IVOR38-41 if we're running on hardware * that supports category E.HV. The architectural way to determine @@ -212,7 +212,7 @@ _GLOBAL(__setup_cpu_e5500) mfspr r10,SPRN_MMUCFG rlwinm. r10,r10,0,MMUCFG_LPIDSIZE beq 1f - bl .setup_ehv_ivors + bl setup_ehv_ivors b 2f 1: ld r10,CPU_SPEC_FEATURES(r4) diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 662c6dd98072..9fde8a1bf1e1 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S @@ -39,8 +39,8 @@ * System calls. */ .section ".toc","aw" -.SYS_CALL_TABLE: - .tc .sys_call_table[TC],.sys_call_table +SYS_CALL_TABLE: + .tc sys_call_table[TC],sys_call_table /* This value is used to mark exception frames on the stack. */ exception_marker: @@ -106,7 +106,7 @@ BEGIN_FW_FTR_SECTION LDX_BE r10,0,r10 /* get log write index */ cmpd cr1,r11,r10 beq+ cr1,33f - bl .accumulate_stolen_time + bl accumulate_stolen_time REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -143,7 +143,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) std r10,SOFTE(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall + bl do_show_syscall REST_GPR(0,r1) REST_4GPRS(3,r1) REST_2GPRS(7,r1) @@ -162,7 +162,7 @@ system_call: /* label this so stack traces look sane */ * Need to vector to 32 Bit or default sys_call_table here, * based on caller's run-mode / personality. */ - ld r11,.SYS_CALL_TABLE@toc(2) + ld r11,SYS_CALL_TABLE@toc(2) andi. r10,r10,_TIF_32BIT beq 15f addi r11,r11,8 /* use 32-bit syscall entries */ @@ -174,14 +174,14 @@ system_call: /* label this so stack traces look sane */ clrldi r8,r8,32 15: slwi r0,r0,4 - ldx r10,r11,r0 /* Fetch system call handler [ptr] */ - mtctr r10 + ldx r12,r11,r0 /* Fetch system call handler [ptr] */ + mtctr r12 bctrl /* Call handler */ syscall_exit: std r3,RESULT(r1) #ifdef SHOW_SYSCALLS - bl .do_show_syscall_exit + bl do_show_syscall_exit ld r3,RESULT(r1) #endif CURRENT_THREAD_INFO(r12, r1) @@ -248,9 +248,9 @@ syscall_error: /* Traced system call support */ syscall_dotrace: - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_enter + bl do_syscall_trace_enter /* * Restore argument registers possibly just changed. * We use the return value of do_syscall_trace_enter @@ -308,7 +308,7 @@ syscall_exit_work: 4: /* Anything else left to do? */ SET_DEFAULT_THREAD_PPR(r3, r10) /* Set thread.ppr = 3 */ andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) - beq .ret_from_except_lite + beq ret_from_except_lite /* Re-enable interrupts */ #ifdef CONFIG_PPC_BOOK3E @@ -319,10 +319,10 @@ syscall_exit_work: mtmsrd r10,1 #endif /* CONFIG_PPC_BOOK3E */ - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_syscall_trace_leave - b .ret_from_except + bl do_syscall_trace_leave + b ret_from_except /* Save non-volatile GPRs, if not already saved. */ _GLOBAL(save_nvgprs) @@ -345,42 +345,44 @@ _GLOBAL(save_nvgprs) */ _GLOBAL(ppc_fork) - bl .save_nvgprs - bl .sys_fork + bl save_nvgprs + bl sys_fork b syscall_exit _GLOBAL(ppc_vfork) - bl .save_nvgprs - bl .sys_vfork + bl save_nvgprs + bl sys_vfork b syscall_exit _GLOBAL(ppc_clone) - bl .save_nvgprs - bl .sys_clone + bl save_nvgprs + bl sys_clone b syscall_exit _GLOBAL(ppc32_swapcontext) - bl .save_nvgprs - bl .compat_sys_swapcontext + bl save_nvgprs + bl compat_sys_swapcontext b syscall_exit _GLOBAL(ppc64_swapcontext) - bl .save_nvgprs - bl .sys_swapcontext + bl save_nvgprs + bl sys_swapcontext b syscall_exit _GLOBAL(ret_from_fork) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) li r3,0 b syscall_exit _GLOBAL(ret_from_kernel_thread) - bl .schedule_tail + bl schedule_tail REST_NVGPRS(r1) - ld r14, 0(r14) mtlr r14 mr r3,r15 +#if defined(_CALL_ELF) && _CALL_ELF == 2 + mr r12,r14 +#endif blrl li r3,0 b syscall_exit @@ -611,7 +613,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_DSCR) _GLOBAL(ret_from_except) ld r11,_TRAP(r1) andi. r0,r11,1 - bne .ret_from_except_lite + bne ret_from_except_lite REST_NVGPRS(r1) _GLOBAL(ret_from_except_lite) @@ -661,23 +663,23 @@ _GLOBAL(ret_from_except_lite) #endif 1: andi. r0,r4,_TIF_NEED_RESCHED beq 2f - bl .restore_interrupts + bl restore_interrupts SCHEDULE_USER - b .ret_from_except_lite + b ret_from_except_lite 2: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM andi. r0,r4,_TIF_USER_WORK_MASK & ~_TIF_RESTORE_TM bne 3f /* only restore TM if nothing else to do */ addi r3,r1,STACK_FRAME_OVERHEAD - bl .restore_tm_state + bl restore_tm_state b restore 3: #endif - bl .save_nvgprs - bl .restore_interrupts + bl save_nvgprs + bl restore_interrupts addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_notify_resume - b .ret_from_except + bl do_notify_resume + b ret_from_except resume_kernel: /* check current_thread_info, _TIF_EMULATE_STACK_STORE */ @@ -730,7 +732,7 @@ resume_kernel: * sure we are soft-disabled first and reconcile irq state. */ RECONCILE_IRQ_STATE(r3,r4) -1: bl .preempt_schedule_irq +1: bl preempt_schedule_irq /* Re-test flags and eventually loop */ CURRENT_THREAD_INFO(r9, r1) @@ -792,7 +794,7 @@ restore_no_replay: */ do_restore: #ifdef CONFIG_PPC_BOOK3E - b .exception_return_book3e + b exception_return_book3e #else /* * Clear the reservation. If we know the CPU tracks the address of @@ -907,7 +909,7 @@ restore_check_irq_replay: * * Still, this might be useful for things like hash_page */ - bl .__check_irq_replay + bl __check_irq_replay cmpwi cr0,r3,0 beq restore_no_replay @@ -928,13 +930,13 @@ restore_check_irq_replay: cmpwi cr0,r3,0x500 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .do_IRQ - b .ret_from_except + bl do_IRQ + b ret_from_except 1: cmpwi cr0,r3,0x900 bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .timer_interrupt - b .ret_from_except + bl timer_interrupt + b ret_from_except #ifdef CONFIG_PPC_DOORBELL 1: #ifdef CONFIG_PPC_BOOK3E @@ -948,14 +950,14 @@ restore_check_irq_replay: #endif /* CONFIG_PPC_BOOK3E */ bne 1f addi r3,r1,STACK_FRAME_OVERHEAD; - bl .doorbell_exception - b .ret_from_except + bl doorbell_exception + b ret_from_except #endif /* CONFIG_PPC_DOORBELL */ -1: b .ret_from_except /* What else to do here ? */ +1: b ret_from_except /* What else to do here ? */ unrecov_restore: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b unrecov_restore #ifdef CONFIG_PPC_RTAS @@ -1021,7 +1023,7 @@ _GLOBAL(enter_rtas) std r6,PACASAVEDMSR(r13) /* Setup our real return addr */ - LOAD_REG_ADDR(r4,.rtas_return_loc) + LOAD_REG_ADDR(r4,rtas_return_loc) clrldi r4,r4,2 /* convert to realmode address */ mtlr r4 @@ -1045,7 +1047,7 @@ _GLOBAL(enter_rtas) rfid b . /* prevent speculative execution */ -_STATIC(rtas_return_loc) +rtas_return_loc: FIXUP_ENDIAN /* relocation is off at this point */ @@ -1054,7 +1056,7 @@ _STATIC(rtas_return_loc) bcl 20,31,$+4 0: mflr r3 - ld r3,(1f-0b)(r3) /* get &.rtas_restore_regs */ + ld r3,(1f-0b)(r3) /* get &rtas_restore_regs */ mfmsr r6 li r0,MSR_RI @@ -1071,9 +1073,9 @@ _STATIC(rtas_return_loc) b . /* prevent speculative execution */ .align 3 -1: .llong .rtas_restore_regs +1: .llong rtas_restore_regs -_STATIC(rtas_restore_regs) +rtas_restore_regs: /* relocation is on at this point */ REST_GPR(2, r1) /* Restore the TOC */ REST_GPR(13, r1) /* Restore paca */ @@ -1173,7 +1175,7 @@ _GLOBAL(mcount) _GLOBAL(_mcount) blr -_GLOBAL(ftrace_caller) +_GLOBAL_TOC(ftrace_caller) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -1197,10 +1199,7 @@ _GLOBAL(ftrace_graph_stub) _GLOBAL(ftrace_stub) blr #else -_GLOBAL(mcount) - blr - -_GLOBAL(_mcount) +_GLOBAL_TOC(_mcount) /* Taken from output of objdump from lib64/glibc */ mflr r3 ld r11, 0(r1) @@ -1238,7 +1237,7 @@ _GLOBAL(ftrace_graph_caller) ld r11, 112(r1) addi r3, r11, 16 - bl .prepare_ftrace_return + bl prepare_ftrace_return nop ld r0, 128(r1) @@ -1254,7 +1253,7 @@ _GLOBAL(return_to_handler) mr r31, r1 stdu r1, -112(r1) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ @@ -1284,7 +1283,7 @@ _GLOBAL(mod_return_to_handler) */ ld r2, PACATOC(r13) - bl .ftrace_return_to_handler + bl ftrace_return_to_handler nop /* return value has real return address */ diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index c1bee3ce9d1f..771b4e92e5d9 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S @@ -499,7 +499,7 @@ exc_##n##_bad_stack: \ CHECK_NAPPING(); \ addi r3,r1,STACK_FRAME_OVERHEAD; \ bl hdlr; \ - b .ret_from_except_lite; + b ret_from_except_lite; /* This value is used to mark exception frames on the stack. */ .section ".toc","aw" @@ -550,11 +550,11 @@ interrupt_end_book3e: CRIT_EXCEPTION_PROLOG(0x100, BOOKE_INTERRUPT_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x100) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Machine Check Interrupt */ @@ -562,11 +562,11 @@ interrupt_end_book3e: MC_EXCEPTION_PROLOG(0x000, BOOKE_INTERRUPT_MACHINE_CHECK, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_MC(0x000) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception + bl machine_check_exception b ret_from_mc_except /* Data Storage Interrupt */ @@ -591,7 +591,7 @@ interrupt_end_book3e: /* External Input Interrupt */ MASKABLE_EXCEPTION(0x500, BOOKE_INTERRUPT_EXTERNAL, - external_input, .do_IRQ, ACK_NONE) + external_input, do_IRQ, ACK_NONE) /* Alignment */ START_EXCEPTION(alignment); @@ -612,9 +612,9 @@ interrupt_end_book3e: std r14,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) - bl .save_nvgprs - bl .program_check_exception - b .ret_from_except + bl save_nvgprs + bl program_check_exception + b ret_from_except /* Floating Point Unavailable Interrupt */ START_EXCEPTION(fp_unavailable); @@ -625,13 +625,13 @@ interrupt_end_book3e: ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_fpu + bl load_up_fpu b fast_exception_return 1: INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception - b .ret_from_except + bl kernel_fp_unavailable_exception + b ret_from_except /* Altivec Unavailable Interrupt */ START_EXCEPTION(altivec_unavailable); @@ -644,16 +644,16 @@ BEGIN_FTR_SECTION ld r12,_MSR(r1) andi. r0,r12,MSR_PR; beq- 1f - bl .load_up_altivec + bl load_up_altivec b fast_exception_return 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except /* AltiVec Assist */ START_EXCEPTION(altivec_assist); @@ -662,39 +662,39 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x220) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_ALTIVEC BEGIN_FTR_SECTION - bl .altivec_assist_exception + bl altivec_assist_exception END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #else - bl .unknown_exception + bl unknown_exception #endif - b .ret_from_except + b ret_from_except /* Decrementer Interrupt */ MASKABLE_EXCEPTION(0x900, BOOKE_INTERRUPT_DECREMENTER, - decrementer, .timer_interrupt, ACK_DEC) + decrementer, timer_interrupt, ACK_DEC) /* Fixed Interval Timer Interrupt */ MASKABLE_EXCEPTION(0x980, BOOKE_INTERRUPT_FIT, - fixed_interval, .unknown_exception, ACK_FIT) + fixed_interval, unknown_exception, ACK_FIT) /* Watchdog Timer Interrupt */ START_EXCEPTION(watchdog); CRIT_EXCEPTION_PROLOG(0x9f0, BOOKE_INTERRUPT_WATCHDOG, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x9f0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD #ifdef CONFIG_BOOKE_WDT - bl .WatchdogException + bl WatchdogException #else - bl .unknown_exception + bl unknown_exception #endif b ret_from_crit_except @@ -712,10 +712,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0xf20) INTS_DISABLE - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Debug exception as a critical interrupt*/ START_EXCEPTION(debug_crit); @@ -774,9 +774,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) mr r4,r14 ld r14,PACA_EXCRIT+EX_R14(r13) ld r15,PACA_EXCRIT+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except kernel_dbg_exc: b . /* NYI */ @@ -839,9 +839,9 @@ kernel_dbg_exc: mr r4,r14 ld r14,PACA_EXDBG+EX_R14(r13) ld r15,PACA_EXDBG+EX_R15(r13) - bl .save_nvgprs - bl .DebugException - b .ret_from_except + bl save_nvgprs + bl DebugException + b ret_from_except START_EXCEPTION(perfmon); NORMAL_EXCEPTION_PROLOG(0x260, BOOKE_INTERRUPT_PERFORMANCE_MONITOR, @@ -850,23 +850,23 @@ kernel_dbg_exc: INTS_DISABLE CHECK_NAPPING() addi r3,r1,STACK_FRAME_OVERHEAD - bl .performance_monitor_exception - b .ret_from_except_lite + bl performance_monitor_exception + b ret_from_except_lite /* Doorbell interrupt */ MASKABLE_EXCEPTION(0x280, BOOKE_INTERRUPT_DOORBELL, - doorbell, .doorbell_exception, ACK_NONE) + doorbell, doorbell_exception, ACK_NONE) /* Doorbell critical Interrupt */ START_EXCEPTION(doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2a0, BOOKE_INTERRUPT_DOORBELL_CRITICAL, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2a0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* @@ -878,21 +878,21 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x2c0) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Guest Doorbell critical Interrupt */ START_EXCEPTION(guest_doorbell_crit); CRIT_EXCEPTION_PROLOG(0x2e0, BOOKE_INTERRUPT_GUEST_DBELL_CRIT, PROLOG_ADDITION_NONE) EXCEPTION_COMMON_CRIT(0x2e0) - bl .save_nvgprs + bl save_nvgprs bl special_reg_save CHECK_NAPPING(); addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception + bl unknown_exception b ret_from_crit_except /* Hypervisor call */ @@ -901,10 +901,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x310) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* Embedded Hypervisor priviledged */ START_EXCEPTION(ehpriv); @@ -912,10 +912,10 @@ kernel_dbg_exc: PROLOG_ADDITION_NONE) EXCEPTION_COMMON(0x320) addi r3,r1,STACK_FRAME_OVERHEAD - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except /* LRAT Error interrupt */ START_EXCEPTION(lrat_error); @@ -1014,16 +1014,16 @@ storage_fault_common: mr r5,r15 ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .do_page_fault + bl do_page_fault cmpdi r3,0 bne- 1f - b .ret_from_except_lite -1: bl .save_nvgprs + b ret_from_except_lite +1: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* * Alignment exception doesn't fit entirely in the 0x100 bytes so it @@ -1035,10 +1035,10 @@ alignment_more: addi r3,r1,STACK_FRAME_OVERHEAD ld r14,PACA_EXGEN+EX_R14(r13) ld r15,PACA_EXGEN+EX_R15(r13) - bl .save_nvgprs + bl save_nvgprs INTS_RESTORE_HARD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except /* * We branch here from entry_64.S for the last stage of the exception @@ -1172,7 +1172,7 @@ bad_stack_book3e: std r12,0(r11) ld r2,PACATOC(r13) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1521,13 +1521,13 @@ _GLOBAL(start_initialization_book3e) * and always use AS 0, so we just set it up to match our link * address and never use 0 based addresses. */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* Init global core bits */ - bl .init_core_book3e + bl init_core_book3e /* Init per-thread bits */ - bl .init_thread_book3e + bl init_thread_book3e /* Return to common init code */ tovirt(r28,r28) @@ -1548,7 +1548,7 @@ _GLOBAL(start_initialization_book3e) */ _GLOBAL(book3e_secondary_core_init_tlb_set) li r4,1 - b .generic_secondary_smp_init + b generic_secondary_smp_init _GLOBAL(book3e_secondary_core_init) mflr r28 @@ -1558,18 +1558,18 @@ _GLOBAL(book3e_secondary_core_init) bne 2f /* Setup TLB for this core */ - bl .initial_tlb_book3e + bl initial_tlb_book3e /* We can return from the above running at a different * address, so recalculate r2 (TOC) */ - bl .relative_toc + bl relative_toc /* Init global core bits */ -2: bl .init_core_book3e +2: bl init_core_book3e /* Init per-thread bits */ -3: bl .init_thread_book3e +3: bl init_thread_book3e /* Return to common init code at proper virtual address. * @@ -1596,14 +1596,14 @@ _GLOBAL(book3e_secondary_thread_init) mflr r28 b 3b -_STATIC(init_core_book3e) +init_core_book3e: /* Establish the interrupt vector base */ LOAD_REG_IMMEDIATE(r3, interrupt_base_book3e) mtspr SPRN_IVPR,r3 sync blr -_STATIC(init_thread_book3e) +init_thread_book3e: lis r3,(SPRN_EPCR_ICM | SPRN_EPCR_GICM)@h mtspr SPRN_EPCR,r3 diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 3afd3915921a..20f11eb4dff7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S @@ -132,12 +132,12 @@ BEGIN_FTR_SECTION #endif beq cr1,2f - b .power7_wakeup_noloss -2: b .power7_wakeup_loss + b power7_wakeup_noloss +2: b power7_wakeup_loss /* Fast Sleep wakeup on PowerNV */ 8: GET_PACA(r13) - b .power7_wakeup_tb_loss + b power7_wakeup_tb_loss 9: END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206) @@ -211,16 +211,16 @@ data_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -243,11 +243,11 @@ instruction_access_slb_pSeries: #endif /* __DISABLED__ */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -524,7 +524,7 @@ do_stab_bolted_pSeries: std r12,PACA_EXSLB+EX_R12(r13) GET_SCRATCH0(r10) std r10,PACA_EXSLB+EX_R13(r13) - EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) + EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD) KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300) KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380) @@ -769,38 +769,38 @@ kvmppc_skip_Hinterrupt: /*** Common interrupt handlers ***/ - STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception) + STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception) STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ) - STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt) - STD_EXCEPTION_COMMON(0x980, hdecrementer, .hdec_interrupt) + STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt) + STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception) #endif - STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) - STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) - STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) - STD_EXCEPTION_COMMON(0xe40, emulation_assist, .emulation_assist_interrupt) - STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) + STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception) + STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception) + STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception) + STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt) + STD_EXCEPTION_COMMON(0xe60, hmi_exception, unknown_exception) #ifdef CONFIG_PPC_DOORBELL - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .doorbell_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception) #else - STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception) #endif - STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception) - STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) - STD_EXCEPTION_COMMON(0x1502, denorm, .unknown_exception) + STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception) + STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception) + STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception) #ifdef CONFIG_ALTIVEC - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception) #else - STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception) + STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception) #endif #ifdef CONFIG_CBE_RAS - STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception) - STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception) - STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception) + STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception) + STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception) + STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception) #endif /* CONFIG_CBE_RAS */ /* @@ -829,16 +829,16 @@ data_access_slb_relon_pSeries: mfspr r3,SPRN_DAR mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else /* - * We can't just use a direct branch to .slb_miss_realmode + * We can't just use a direct branch to slb_miss_realmode * because the distance from here to there depends on where * the kernel ends up being put. */ mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -854,11 +854,11 @@ instruction_access_slb_relon_pSeries: mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ mfspr r12,SPRN_SRR1 #ifndef CONFIG_RELOCATABLE - b .slb_miss_realmode + b slb_miss_realmode #else mfctr r11 ld r10,PACAKBASE(r13) - LOAD_HANDLER(r10, .slb_miss_realmode) + LOAD_HANDLER(r10, slb_miss_realmode) mtctr r10 bctr #endif @@ -966,7 +966,7 @@ system_call_entry: b system_call_common ppc64_runlatch_on_trampoline: - b .__ppc64_runlatch_on + b __ppc64_runlatch_on /* * Here we have detected that the kernel stack pointer is bad. @@ -1025,7 +1025,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_CFAR) std r12,RESULT(r1) std r11,STACK_FRAME_OVERHEAD-16(r1) 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_bad_stack + bl kernel_bad_stack b 1b /* @@ -1046,7 +1046,7 @@ data_access_common: ld r3,PACA_EXGEN+EX_DAR(r13) lwz r4,PACA_EXGEN+EX_DSISR(r13) li r5,0x300 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ .align 7 .globl h_data_storage_common @@ -1056,11 +1056,11 @@ h_data_storage_common: mfspr r10,SPRN_HDSISR stw r10,PACA_EXGEN+EX_DSISR(r13) EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .unknown_exception - b .ret_from_except + bl unknown_exception + b ret_from_except .align 7 .globl instruction_access_common @@ -1071,9 +1071,9 @@ instruction_access_common: ld r3,_NIP(r1) andis. r4,r12,0x5820 li r5,0x400 - b .do_hash_page /* Try to handle as hpte fault */ + b do_hash_page /* Try to handle as hpte fault */ - STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) + STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception) /* * Here is the common SLB miss user that is used when going to virtual @@ -1088,7 +1088,7 @@ slb_miss_user_common: stw r9,PACA_EXGEN+EX_CCR(r13) std r10,PACA_EXGEN+EX_LR(r13) std r11,PACA_EXGEN+EX_SRR0(r13) - bl .slb_allocate_user + bl slb_allocate_user ld r10,PACA_EXGEN+EX_LR(r13) ld r3,PACA_EXGEN+EX_R3(r13) @@ -1131,9 +1131,9 @@ slb_miss_fault: unrecov_user_slb: EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b #endif /* __DISABLED__ */ @@ -1158,10 +1158,10 @@ machine_check_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_exception - b .ret_from_except + bl machine_check_exception + b ret_from_except .align 7 .globl alignment_common @@ -1175,31 +1175,31 @@ alignment_common: lwz r4,PACA_EXGEN+EX_DSISR(r13) std r3,_DAR(r1) std r4,_DSISR(r1) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .alignment_exception - b .ret_from_except + bl alignment_exception + b ret_from_except .align 7 .globl program_check_common program_check_common: EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .program_check_exception - b .ret_from_except + bl program_check_exception + b ret_from_except .align 7 .globl fp_unavailable_common fp_unavailable_common: EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) bne 1f /* if from user, just load it up */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .kernel_fp_unavailable_exception + bl kernel_fp_unavailable_exception BUG_OPCODE 1: #ifdef CONFIG_PPC_TRANSACTIONAL_MEM @@ -1211,15 +1211,15 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_IFSET(CPU_FTR_TM) #endif - bl .load_up_fpu + bl load_up_fpu b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .fp_unavailable_tm - b .ret_from_except + bl fp_unavailable_tm + b ret_from_except #endif .align 7 .globl altivec_unavailable_common @@ -1237,24 +1237,24 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - bl .load_up_altivec + bl load_up_altivec b fast_exception_return #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_tm - b .ret_from_except + bl altivec_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .altivec_unavailable_exception - b .ret_from_except + bl altivec_unavailable_exception + b ret_from_except .align 7 .globl vsx_unavailable_common @@ -1272,26 +1272,26 @@ BEGIN_FTR_SECTION bne- 2f END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69) #endif - b .load_up_vsx + b load_up_vsx #ifdef CONFIG_PPC_TRANSACTIONAL_MEM 2: /* User process was in a transaction */ - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_tm - b .ret_from_except + bl vsx_unavailable_tm + b ret_from_except #endif 1: END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif - bl .save_nvgprs + bl save_nvgprs DISABLE_INTS addi r3,r1,STACK_FRAME_OVERHEAD - bl .vsx_unavailable_exception - b .ret_from_except + bl vsx_unavailable_exception + b ret_from_except - STD_EXCEPTION_COMMON(0xf60, facility_unavailable, .facility_unavailable_exception) - STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, .facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception) + STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception) .align 7 .globl __end_handlers @@ -1386,9 +1386,9 @@ _GLOBAL(opal_mc_secondary_handler) machine_check_handle_early: std r0,GPR0(r1) /* Save r0 */ EXCEPTION_PROLOG_COMMON_3(0x200) - bl .save_nvgprs + bl save_nvgprs addi r3,r1,STACK_FRAME_OVERHEAD - bl .machine_check_early + bl machine_check_early ld r12,_MSR(r1) #ifdef CONFIG_PPC_P7_NAP /* @@ -1408,11 +1408,11 @@ machine_check_handle_early: /* Supervisor state loss */ li r0,1 stb r0,PACA_NAPSTATELOST(r13) -3: bl .machine_check_queue_event +3: bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP GET_PACA(r13) ld r1,PACAR1(r13) - b .power7_enter_nap_mode + b power7_enter_nap_mode 4: #endif /* @@ -1444,7 +1444,7 @@ machine_check_handle_early: andi. r11,r12,MSR_RI bne 2f 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b 2: /* @@ -1452,7 +1452,7 @@ machine_check_handle_early: * Queue up the MCE event so that we can log it later, while * returning from kernel or opal call. */ - bl .machine_check_queue_event + bl machine_check_queue_event MACHINE_CHECK_HANDLER_WINDUP rfid 9: @@ -1468,7 +1468,7 @@ machine_check_handle_early: * r3 is saved in paca->slb_r3 * We assume we aren't going to take any exceptions during this procedure. */ -_GLOBAL(slb_miss_realmode) +slb_miss_realmode: mflr r10 #ifdef CONFIG_RELOCATABLE mtctr r11 @@ -1477,7 +1477,7 @@ _GLOBAL(slb_miss_realmode) stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */ - bl .slb_allocate_realmode + bl slb_allocate_realmode /* All done -- return from exception. */ @@ -1517,9 +1517,9 @@ _GLOBAL(slb_miss_realmode) unrecov_slb: EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB) DISABLE_INTS - bl .save_nvgprs + bl save_nvgprs 1: addi r3,r1,STACK_FRAME_OVERHEAD - bl .unrecoverable_exception + bl unrecoverable_exception b 1b @@ -1536,7 +1536,7 @@ power4_fixup_nap: * Hash table stuff */ .align 7 -_STATIC(do_hash_page) +do_hash_page: std r3,_DAR(r1) std r4,_DSISR(r1) @@ -1573,7 +1573,7 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) * * at return r3 = 0 for success, 1 for page fault, negative for error */ - bl .hash_page /* build HPTE if possible */ + bl hash_page /* build HPTE if possible */ cmpdi r3,0 /* see if hash_page succeeded */ /* Success */ @@ -1587,35 +1587,35 @@ handle_page_fault: 11: ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_page_fault + bl do_page_fault cmpdi r3,0 beq+ 12f - bl .save_nvgprs + bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD lwz r4,_DAR(r1) - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* We have a data breakpoint exception - handle it */ handle_dabr_fault: - bl .save_nvgprs + bl save_nvgprs ld r4,_DAR(r1) ld r5,_DSISR(r1) addi r3,r1,STACK_FRAME_OVERHEAD - bl .do_break -12: b .ret_from_except_lite + bl do_break +12: b ret_from_except_lite /* We have a page fault that hash_page could handle but HV refused * the PTE insertion */ -13: bl .save_nvgprs +13: bl save_nvgprs mr r5,r3 addi r3,r1,STACK_FRAME_OVERHEAD ld r4,_DAR(r1) - bl .low_hash_fault - b .ret_from_except + bl low_hash_fault + b ret_from_except /* * We come here as a result of a DSI at a point where we don't want @@ -1624,16 +1624,16 @@ handle_dabr_fault: * were soft-disabled. We want to invoke the exception handler for * the access, or panic if there isn't a handler. */ -77: bl .save_nvgprs +77: bl save_nvgprs mr r4,r3 addi r3,r1,STACK_FRAME_OVERHEAD li r5,SIGSEGV - bl .bad_page_fault - b .ret_from_except + bl bad_page_fault + b ret_from_except /* here we have a segment miss */ do_ste_alloc: - bl .ste_allocate /* try to insert stab entry */ + bl ste_allocate /* try to insert stab entry */ cmpdi r3,0 bne- handle_page_fault b fast_exception_return @@ -1646,7 +1646,7 @@ do_ste_alloc: * We assume (DAR >> 60) == 0xc. */ .align 7 -_GLOBAL(do_stab_bolted) +do_stab_bolted: stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ mfspr r11,SPRN_DAR /* ea */ diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index 6a014c763cc7..f202d0731b06 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c @@ -105,11 +105,9 @@ __ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { unsigned int op; - unsigned int jmp[5]; unsigned long ptr; unsigned long ip = rec->ip; - unsigned long tramp; - int offset; + void *tramp; /* read where this goes */ if (probe_kernel_read(&op, (void *)ip, sizeof(int))) @@ -122,96 +120,41 @@ __ftrace_make_nop(struct module *mod, } /* lets find where the pointer goes */ - tramp = find_bl_target(ip, op); - - /* - * On PPC64 the trampoline looks like: - * 0x3d, 0x82, 0x00, 0x00, addis r12,r2, <high> - * 0x39, 0x8c, 0x00, 0x00, addi r12,r12, <low> - * Where the bytes 2,3,6 and 7 make up the 32bit offset - * to the TOC that holds the pointer. - * to jump to. - * 0xf8, 0x41, 0x00, 0x28, std r2,40(r1) - * 0xe9, 0x6c, 0x00, 0x20, ld r11,32(r12) - * The actually address is 32 bytes from the offset - * into the TOC. - * 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12) - */ - - pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc); - - /* Find where the trampoline jumps to */ - if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { - printk(KERN_ERR "Failed to read %lx\n", tramp); - return -EFAULT; - } + tramp = (void *)find_bl_target(ip, op); - pr_devel(" %08x %08x", jmp[0], jmp[1]); + pr_devel("ip:%lx jumps to %p", ip, tramp); - /* verify that this is what we expect it to be */ - if (((jmp[0] & 0xffff0000) != 0x3d820000) || - ((jmp[1] & 0xffff0000) != 0x398c0000) || - (jmp[2] != 0xf8410028) || - (jmp[3] != 0xe96c0020) || - (jmp[4] != 0xe84c0028)) { + if (!is_module_trampoline(tramp)) { printk(KERN_ERR "Not a trampoline\n"); return -EINVAL; } - /* The bottom half is signed extended */ - offset = ((unsigned)((unsigned short)jmp[0]) << 16) + - (int)((short)jmp[1]); - - pr_devel(" %x ", offset); - - /* get the address this jumps too */ - tramp = mod->arch.toc + offset + 32; - pr_devel("toc: %lx", tramp); - - if (probe_kernel_read(jmp, (void *)tramp, 8)) { - printk(KERN_ERR "Failed to read %lx\n", tramp); + if (module_trampoline_target(mod, tramp, &ptr)) { + printk(KERN_ERR "Failed to get trampoline target\n"); return -EFAULT; } - pr_devel(" %08x %08x\n", jmp[0], jmp[1]); - -#ifdef __LITTLE_ENDIAN__ - ptr = ((unsigned long)jmp[1] << 32) + jmp[0]; -#else - ptr = ((unsigned long)jmp[0] << 32) + jmp[1]; -#endif + pr_devel("trampoline target %lx", ptr); /* This should match what was called */ if (ptr != ppc_function_entry((void *)addr)) { - printk(KERN_ERR "addr does not match %lx\n", ptr); + printk(KERN_ERR "addr %lx does not match expected %lx\n", + ptr, ppc_function_entry((void *)addr)); return -EINVAL; } /* - * We want to nop the line, but the next line is - * 0xe8, 0x41, 0x00, 0x28 ld r2,40(r1) - * This needs to be turned to a nop too. - */ - if (probe_kernel_read(&op, (void *)(ip+4), MCOUNT_INSN_SIZE)) - return -EFAULT; - - if (op != 0xe8410028) { - printk(KERN_ERR "Next line is not ld! (%08x)\n", op); - return -EINVAL; - } - - /* - * Milton Miller pointed out that we can not blindly do nops. - * If a task was preempted when calling a trace function, - * the nops will remove the way to restore the TOC in r2 - * and the r2 TOC will get corrupted. - */ - - /* - * Replace: - * bl <tramp> <==== will be replaced with "b 1f" - * ld r2,40(r1) - * 1: + * Our original call site looks like: + * + * bl <tramp> + * ld r2,XX(r1) + * + * Milton Miller pointed out that we can not simply nop the branch. + * If a task was preempted when calling a trace function, the nops + * will remove the way to restore the TOC in r2 and the r2 TOC will + * get corrupted. + * + * Use a b +8 to jump over the load. */ op = 0x48000008; /* b +8 */ @@ -349,19 +292,24 @@ static int __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned int op[2]; - unsigned long ip = rec->ip; + void *ip = (void *)rec->ip; /* read where this goes */ - if (probe_kernel_read(op, (void *)ip, MCOUNT_INSN_SIZE * 2)) + if (probe_kernel_read(op, ip, sizeof(op))) return -EFAULT; /* - * It should be pointing to two nops or - * b +8; ld r2,40(r1) + * We expect to see: + * + * b +8 + * ld r2,XX(r1) + * + * The load offset is different depending on the ABI. For simplicity + * just mask it out when doing the compare. */ - if (((op[0] != 0x48000008) || (op[1] != 0xe8410028)) && - ((op[0] != PPC_INST_NOP) || (op[1] != PPC_INST_NOP))) { - printk(KERN_ERR "Expected NOPs but have %x %x\n", op[0], op[1]); + if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { + printk(KERN_ERR "Unexpected call sequence: %x %x\n", + op[0], op[1]); return -EINVAL; } @@ -371,23 +319,16 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) return -EINVAL; } - /* create the branch to the trampoline */ - op[0] = create_branch((unsigned int *)ip, - rec->arch.mod->arch.tramp, BRANCH_SET_LINK); - if (!op[0]) { - printk(KERN_ERR "REL24 out of range!\n"); + /* Ensure branch is within 24 bits */ + if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "Branch out of range"); return -EINVAL; } - /* ld r2,40(r1) */ - op[1] = 0xe8410028; - - pr_devel("write to %lx\n", rec->ip); - - if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2)) - return -EPERM; - - flush_icache_range(ip, ip + 8); + if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { + printk(KERN_ERR "REL24 out of range!\n"); + return -EINVAL; + } return 0; } diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index b7363bd42452..a95145d7f61b 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -70,16 +70,15 @@ _GLOBAL(__start) /* NOP this out unconditionally */ BEGIN_FTR_SECTION FIXUP_ENDIAN - b .__start_initialization_multiplatform + b __start_initialization_multiplatform END_FTR_SECTION(0, 1) /* Catch branch to 0 in real mode */ trap - /* Secondary processors spin on this value until it becomes nonzero. - * When it does it contains the real address of the descriptor - * of the function that the cpu should jump to to continue - * initialization. + /* Secondary processors spin on this value until it becomes non-zero. + * When non-zero, it contains the real address of the function the cpu + * should jump to. */ .balign 8 .globl __secondary_hold_spinloop @@ -140,16 +139,15 @@ __secondary_hold: tovirt(r26,r26) #endif /* All secondary cpus wait here until told to start. */ -100: ld r4,__secondary_hold_spinloop-_stext(r26) - cmpdi 0,r4,0 +100: ld r12,__secondary_hold_spinloop-_stext(r26) + cmpdi 0,r12,0 beq 100b #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) #ifdef CONFIG_PPC_BOOK3E - tovirt(r4,r4) + tovirt(r12,r12) #endif - ld r4,0(r4) /* deref function descriptor */ - mtctr r4 + mtctr r12 mr r3,r24 /* * it may be the case that other platforms have r4 right to @@ -186,16 +184,16 @@ _GLOBAL(generic_secondary_thread_init) mr r24,r3 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 - bl .book3e_secondary_thread_init + bl book3e_secondary_thread_init #endif b generic_secondary_common_init @@ -214,17 +212,17 @@ _GLOBAL(generic_secondary_smp_init) mr r25,r4 /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode /* get a valid TOC pointer, wherever we're mapped at */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) #ifdef CONFIG_PPC_BOOK3E /* Book3E initialization */ mr r3,r24 mr r4,r25 - bl .book3e_secondary_core_init + bl book3e_secondary_core_init #endif generic_secondary_common_init: @@ -236,7 +234,7 @@ generic_secondary_common_init: ld r13,0(r13) /* Get base vaddr of paca array */ #ifndef CONFIG_SMP addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ - b .kexec_wait /* wait for next kernel if !SMP */ + b kexec_wait /* wait for next kernel if !SMP */ #else LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ lwz r7,0(r7) /* also the max paca allocated */ @@ -250,7 +248,7 @@ generic_secondary_common_init: blt 1b mr r3,r24 /* not found, copy phys to r3 */ - b .kexec_wait /* next kernel might do better */ + b kexec_wait /* next kernel might do better */ 2: SET_PACA(r13) #ifdef CONFIG_PPC_BOOK3E @@ -264,11 +262,13 @@ generic_secondary_common_init: /* See if we need to call a cpu state restore handler */ LOAD_REG_ADDR(r23, cur_cpu_spec) ld r23,0(r23) - ld r23,CPU_SPEC_RESTORE(r23) - cmpdi 0,r23,0 + ld r12,CPU_SPEC_RESTORE(r23) + cmpdi 0,r12,0 beq 3f - ld r23,0(r23) - mtctr r23 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r12) +#endif + mtctr r12 bctrl 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ @@ -299,7 +299,7 @@ generic_secondary_common_init: * Assumes we're mapped EA == RA if the MMU is on. */ #ifdef CONFIG_PPC_BOOK3S -_STATIC(__mmu_off) +__mmu_off: mfmsr r3 andi. r0,r3,MSR_IR|MSR_DR beqlr @@ -324,12 +324,12 @@ _STATIC(__mmu_off) * DT block, r4 is a physical pointer to the kernel itself * */ -_GLOBAL(__start_initialization_multiplatform) +__start_initialization_multiplatform: /* Make sure we are running in 64 bits mode */ - bl .enable_64b_mode + bl enable_64b_mode /* Get TOC pointer (current runtime address) */ - bl .relative_toc + bl relative_toc /* find out where we are now */ bcl 20,31,$+4 @@ -342,7 +342,7 @@ _GLOBAL(__start_initialization_multiplatform) */ cmpldi cr0,r5,0 beq 1f - b .__boot_from_prom /* yes -> prom */ + b __boot_from_prom /* yes -> prom */ 1: /* Save parameters */ mr r31,r3 @@ -354,8 +354,8 @@ _GLOBAL(__start_initialization_multiplatform) #endif #ifdef CONFIG_PPC_BOOK3E - bl .start_initialization_book3e - b .__after_prom_start + bl start_initialization_book3e + b __after_prom_start #else /* Setup some critical 970 SPRs before switching MMU off */ mfspr r0,SPRN_PVR @@ -368,15 +368,15 @@ _GLOBAL(__start_initialization_multiplatform) beq 1f cmpwi r0,0x45 /* 970GX */ bne 2f -1: bl .__cpu_preinit_ppc970 +1: bl __cpu_preinit_ppc970 2: /* Switch off MMU if not already off */ - bl .__mmu_off - b .__after_prom_start + bl __mmu_off + b __after_prom_start #endif /* CONFIG_PPC_BOOK3E */ -_INIT_STATIC(__boot_from_prom) +__boot_from_prom: #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE /* Save parameters */ mr r31,r3 @@ -395,7 +395,7 @@ _INIT_STATIC(__boot_from_prom) #ifdef CONFIG_RELOCATABLE /* Relocate code for where we are now */ mr r3,r26 - bl .relocate + bl relocate #endif /* Restore parameters */ @@ -407,14 +407,14 @@ _INIT_STATIC(__boot_from_prom) /* Do all of the interaction with OF client interface */ mr r8,r26 - bl .prom_init + bl prom_init #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ /* We never return. We also hit that trap if trying to boot * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ trap -_STATIC(__after_prom_start) +__after_prom_start: #ifdef CONFIG_RELOCATABLE /* process relocations for the final address of the kernel */ lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ @@ -424,7 +424,7 @@ _STATIC(__after_prom_start) bne 1f add r25,r25,r26 1: mr r3,r25 - bl .relocate + bl relocate #endif /* @@ -464,12 +464,12 @@ _STATIC(__after_prom_start) lis r5,(copy_to_here - _stext)@ha addi r5,r5,(copy_to_here - _stext)@l /* # bytes of memory to copy */ - bl .copy_and_flush /* copy the first n bytes */ + bl copy_and_flush /* copy the first n bytes */ /* this includes the code being */ /* executed here. */ addis r8,r3,(4f - _stext)@ha /* Jump to the copy of this code */ - addi r8,r8,(4f - _stext)@l /* that we just made */ - mtctr r8 + addi r12,r8,(4f - _stext)@l /* that we just made */ + mtctr r12 bctr .balign 8 @@ -478,9 +478,9 @@ p_end: .llong _end - _stext 4: /* Now copy the rest of the kernel up to _end */ addis r5,r26,(p_end - _stext)@ha ld r5,(p_end - _stext)@l(r5) /* get _end */ -5: bl .copy_and_flush /* copy the rest */ +5: bl copy_and_flush /* copy the rest */ -9: b .start_here_multiplatform +9: b start_here_multiplatform /* * Copy routine used to copy the kernel to start at physical address 0 @@ -544,7 +544,7 @@ __secondary_start_pmac_0: _GLOBAL(pmac_secondary_start) /* turn on 64-bit mode */ - bl .enable_64b_mode + bl enable_64b_mode li r0,0 mfspr r3,SPRN_HID4 @@ -556,11 +556,11 @@ _GLOBAL(pmac_secondary_start) slbia /* get TOC pointer (real address) */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Copy some CPU settings from CPU 0 */ - bl .__restore_cpu_ppc970 + bl __restore_cpu_ppc970 /* pSeries do that early though I don't think we really need it */ mfmsr r3 @@ -619,7 +619,7 @@ __secondary_start: std r14,PACAKSAVE(r13) /* Do early setup for that CPU (stab, slb, hash table pointer) */ - bl .early_setup_secondary + bl early_setup_secondary /* * setup the new stack pointer, but *don't* use this until @@ -639,7 +639,7 @@ __secondary_start: stb r0,PACAIRQHAPPENED(r13) /* enable MMU and jump to start_secondary */ - LOAD_REG_ADDR(r3, .start_secondary_prolog) + LOAD_REG_ADDR(r3, start_secondary_prolog) LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) mtspr SPRN_SRR0,r3 @@ -652,11 +652,11 @@ __secondary_start: * zero the stack back-chain pointer and get the TOC virtual address * before going into C code. */ -_GLOBAL(start_secondary_prolog) +start_secondary_prolog: ld r2,PACATOC(r13) li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . /* * Reset stack pointer and call start_secondary @@ -667,14 +667,14 @@ _GLOBAL(start_secondary_resume) ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ li r3,0 std r3,0(r1) /* Zero the stack frame pointer */ - bl .start_secondary + bl start_secondary b . #endif /* * This subroutine clobbers r11 and r12 */ -_GLOBAL(enable_64b_mode) +enable_64b_mode: mfmsr r11 /* grab the current MSR */ #ifdef CONFIG_PPC_BOOK3E oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ @@ -715,9 +715,9 @@ p_toc: .llong __toc_start + 0x8000 - 0b /* * This is where the main kernel code starts. */ -_INIT_STATIC(start_here_multiplatform) +start_here_multiplatform: /* set up the TOC */ - bl .relative_toc + bl relative_toc tovirt(r2,r2) /* Clear out the BSS. It may have been done in prom_init, @@ -776,9 +776,9 @@ _INIT_STATIC(start_here_multiplatform) /* Restore parameters passed from prom_init/kexec */ mr r3,r31 - bl .early_setup /* also sets r13 and SPRG_PACA */ + bl early_setup /* also sets r13 and SPRG_PACA */ - LOAD_REG_ADDR(r3, .start_here_common) + LOAD_REG_ADDR(r3, start_here_common) ld r4,PACAKMSR(r13) mtspr SPRN_SRR0,r3 mtspr SPRN_SRR1,r4 @@ -786,7 +786,8 @@ _INIT_STATIC(start_here_multiplatform) b . /* prevent speculative execution */ /* This is where all platforms converge execution */ -_INIT_GLOBAL(start_here_common) + +start_here_common: /* relocation is on at this point */ std r1,PACAKSAVE(r13) @@ -794,7 +795,7 @@ _INIT_GLOBAL(start_here_common) ld r2,PACATOC(r13) /* Do more system initializations in virtual mode */ - bl .setup_system + bl setup_system /* Mark interrupts soft and hard disabled (they might be enabled * in the PACA when doing hotplug) @@ -805,7 +806,7 @@ _INIT_GLOBAL(start_here_common) stb r0,PACAIRQHAPPENED(r13) /* Generic kernel entry */ - bl .start_kernel + bl start_kernel /* Not reached */ BUG_OPCODE diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S index bfb73cc209ce..48c21acef915 100644 --- a/arch/powerpc/kernel/idle_book3e.S +++ b/arch/powerpc/kernel/idle_book3e.S @@ -43,7 +43,7 @@ _GLOBAL(\name) */ #ifdef CONFIG_TRACE_IRQFLAGS stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 #endif li r0,1 diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index e3edaa189911..f57a19348bdd 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S @@ -46,7 +46,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP) mflr r0 std r0,16(r1) stdu r1,-128(r1) - bl .trace_hardirqs_on + bl trace_hardirqs_on addi r1,r1,128 ld r0,16(r1) mtlr r0 diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S index c3ab86975614..dca6e16c2436 100644 --- a/arch/powerpc/kernel/idle_power7.S +++ b/arch/powerpc/kernel/idle_power7.S @@ -58,7 +58,7 @@ _GLOBAL(power7_powersave_common) /* Make sure FPU, VSX etc... are flushed as we may lose * state when going to nap mode */ - bl .discard_lazy_cpu_state + bl discard_lazy_cpu_state #endif /* CONFIG_SMP */ /* Hard disable interrupts */ @@ -168,7 +168,7 @@ _GLOBAL(power7_wakeup_loss) _GLOBAL(power7_wakeup_noloss) lbz r0,PACA_NAPSTATELOST(r13) cmpwi r0,0 - bne .power7_wakeup_loss + bne power7_wakeup_loss ld r1,PACAR1(r13) ld r4,_MSR(r1) ld r5,_NIP(r1) diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index 3d0249599d52..4e314b90c75d 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S @@ -34,7 +34,7 @@ _GLOBAL(call_do_softirq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r3) mr r1,r3 - bl .__do_softirq + bl __do_softirq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -45,7 +45,7 @@ _GLOBAL(call_do_irq) std r0,16(r1) stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4) mr r1,r4 - bl .__do_irq + bl __do_irq ld r1,0(r1) ld r0,16(r1) mtlr r0 @@ -506,7 +506,7 @@ _GLOBAL(kexec_smp_wait) stb r4,PACAKEXECSTATE(r13) SYNC - b .kexec_wait + b kexec_wait /* * switch to real mode (turn mmu off) @@ -576,7 +576,7 @@ _GLOBAL(kexec_sequence) /* copy dest pages, flush whole dest image */ mr r3,r29 - bl .kexec_copy_flush /* (image) */ + bl kexec_copy_flush /* (image) */ /* turn off mmu */ bl real_mode @@ -586,7 +586,7 @@ _GLOBAL(kexec_sequence) mr r4,r30 /* start, aka phys mem offset */ li r5,0x100 li r6,0 - bl .copy_and_flush /* (dest, src, copy limit, start offset) */ + bl copy_and_flush /* (dest, src, copy limit, start offset) */ 1: /* assume normal blr return */ /* release other cpus to the new kernel secondary start at 0x60 */ @@ -595,8 +595,12 @@ _GLOBAL(kexec_sequence) stw r6,kexec_flag-1b(5) /* clear out hardware hash page table and tlb */ - ld r5,0(r27) /* deref function descriptor */ - mtctr r5 +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + ld r12,0(r27) /* deref function descriptor */ +#else + mr r12,r27 +#endif + mtctr r12 bctrl /* ppc_md.hpte_clear_all(void); */ /* @@ -630,3 +634,31 @@ _GLOBAL(kexec_sequence) li r5,0 blr /* image->start(physid, image->start, 0); */ #endif /* CONFIG_KEXEC */ + +#ifdef CONFIG_MODULES +#if defined(_CALL_ELF) && _CALL_ELF == 2 + +#ifdef CONFIG_MODVERSIONS +.weak __crc_TOC. +.section "___kcrctab+TOC.","a" +.globl __kcrctab_TOC. +__kcrctab_TOC.: + .llong __crc_TOC. +#endif + +/* + * Export a fake .TOC. since both modpost and depmod will complain otherwise. + * Both modpost and depmod strip the leading . so we do the same here. + */ +.section "__ksymtab_strings","a" +__kstrtab_TOC.: + .asciz "TOC." + +.section "___ksymtab+TOC.","a" +/* This symbol name is important: it's used by modpost to find exported syms */ +.globl __ksymtab_TOC. +__ksymtab_TOC.: + .llong 0 /* .value */ + .llong __kstrtab_TOC. +#endif /* ELFv2 */ +#endif /* MODULES */ diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 12664c130d73..ef349d077129 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -22,6 +22,7 @@ #include <linux/vmalloc.h> #include <linux/ftrace.h> #include <linux/bug.h> +#include <linux/uaccess.h> #include <asm/module.h> #include <asm/firmware.h> #include <asm/code-patching.h> @@ -41,46 +42,170 @@ #define DEBUGP(fmt , ...) #endif +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define R2_STACK_OFFSET 24 + +/* An address is simply the address of the function. */ +typedef unsigned long func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return addr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func; +} + +/* PowerPC64 specific values for the Elf64_Sym st_other field. */ +#define STO_PPC64_LOCAL_BIT 5 +#define STO_PPC64_LOCAL_MASK (7 << STO_PPC64_LOCAL_BIT) +#define PPC64_LOCAL_ENTRY_OFFSET(other) \ + (((1 << (((other) & STO_PPC64_LOCAL_MASK) >> STO_PPC64_LOCAL_BIT)) >> 2) << 2) + +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + /* sym->st_other indicates offset to local entry point + * (otherwise it will assume r12 is the address of the start + * of function and try to derive r2 from it). */ + return PPC64_LOCAL_ENTRY_OFFSET(sym->st_other); +} +#else +#define R2_STACK_OFFSET 40 + +/* An address is address of the OPD entry, which contains address of fn. */ +typedef struct ppc64_opd_entry func_desc_t; + +static func_desc_t func_desc(unsigned long addr) +{ + return *(struct ppc64_opd_entry *)addr; +} +static unsigned long func_addr(unsigned long addr) +{ + return func_desc(addr).funcaddr; +} +static unsigned long stub_func_addr(func_desc_t func) +{ + return func.funcaddr; +} +static unsigned int local_entry_offset(const Elf64_Sym *sym) +{ + return 0; +} +#endif + /* Like PPC32, we need little trampolines to do > 24-bit jumps (into the kernel itself). But on PPC64, these need to be used for every jump, actually, to reset r2 (TOC+0x8000). */ struct ppc64_stub_entry { - /* 28 byte jump instruction sequence (7 instructions) */ - unsigned char jump[28]; - unsigned char unused[4]; + /* 28 byte jump instruction sequence (7 instructions). We only + * need 6 instructions on ABIv2 but we always allocate 7 so + * so we don't have to modify the trampoline load instruction. */ + u32 jump[7]; + u32 unused; /* Data for the above code */ - struct ppc64_opd_entry opd; + func_desc_t funcdata; }; -/* We use a stub to fix up r2 (TOC ptr) and to jump to the (external) - function which may be more than 24-bits away. We could simply - patch the new r2 value and function pointer into the stub, but it's - significantly shorter to put these values at the end of the stub - code, and patch the stub address (32-bits relative to the TOC ptr, - r2) into the stub. */ -static struct ppc64_stub_entry ppc64_stub = -{ .jump = { -#ifdef __LITTLE_ENDIAN__ - 0x00, 0x00, 0x82, 0x3d, /* addis r12,r2, <high> */ - 0x00, 0x00, 0x8c, 0x39, /* addi r12,r12, <low> */ - /* Save current r2 value in magic place on the stack. */ - 0x28, 0x00, 0x41, 0xf8, /* std r2,40(r1) */ - 0x20, 0x00, 0x6c, 0xe9, /* ld r11,32(r12) */ - 0x28, 0x00, 0x4c, 0xe8, /* ld r2,40(r12) */ - 0xa6, 0x03, 0x69, 0x7d, /* mtctr r11 */ - 0x20, 0x04, 0x80, 0x4e /* bctr */ -#else - 0x3d, 0x82, 0x00, 0x00, /* addis r12,r2, <high> */ - 0x39, 0x8c, 0x00, 0x00, /* addi r12,r12, <low> */ +/* + * PPC64 uses 24 bit jumps, but we need to jump into other modules or + * the kernel which may be further. So we jump to a stub. + * + * For ELFv1 we need to use this to set up the new r2 value (aka TOC + * pointer). For ELFv2 it's the callee's responsibility to set up the + * new r2, but for both we need to save the old r2. + * + * We could simply patch the new r2 value and function pointer into + * the stub, but it's significantly shorter to put these values at the + * end of the stub code, and patch the stub address (32-bits relative + * to the TOC ptr, r2) into the stub. + */ + +static u32 ppc64_stub_insns[] = { + 0x3d620000, /* addis r11,r2, <high> */ + 0x396b0000, /* addi r11,r11, <low> */ /* Save current r2 value in magic place on the stack. */ - 0xf8, 0x41, 0x00, 0x28, /* std r2,40(r1) */ - 0xe9, 0x6c, 0x00, 0x20, /* ld r11,32(r12) */ - 0xe8, 0x4c, 0x00, 0x28, /* ld r2,40(r12) */ - 0x7d, 0x69, 0x03, 0xa6, /* mtctr r11 */ - 0x4e, 0x80, 0x04, 0x20 /* bctr */ + 0xf8410000|R2_STACK_OFFSET, /* std r2,R2_STACK_OFFSET(r1) */ + 0xe98b0020, /* ld r12,32(r11) */ +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + /* Set up new r2 from function descriptor */ + 0xe84b0026, /* ld r2,40(r11) */ +#endif + 0x7d8903a6, /* mtctr r12 */ + 0x4e800420 /* bctr */ +}; + +#ifdef CONFIG_DYNAMIC_FTRACE + +static u32 ppc64_stub_mask[] = { + 0xffff0000, + 0xffff0000, + 0xffffffff, + 0xffffffff, +#if !defined(_CALL_ELF) || _CALL_ELF != 2 + 0xffffffff, +#endif + 0xffffffff, + 0xffffffff +}; + +bool is_module_trampoline(u32 *p) +{ + unsigned int i; + u32 insns[ARRAY_SIZE(ppc64_stub_insns)]; + + BUILD_BUG_ON(sizeof(ppc64_stub_insns) != sizeof(ppc64_stub_mask)); + + if (probe_kernel_read(insns, p, sizeof(insns))) + return -EFAULT; + + for (i = 0; i < ARRAY_SIZE(ppc64_stub_insns); i++) { + u32 insna = insns[i]; + u32 insnb = ppc64_stub_insns[i]; + u32 mask = ppc64_stub_mask[i]; + + if ((insna & mask) != (insnb & mask)) + return false; + } + + return true; +} + +int module_trampoline_target(struct module *mod, u32 *trampoline, + unsigned long *target) +{ + u32 buf[2]; + u16 upper, lower; + long offset; + void *toc_entry; + + if (probe_kernel_read(buf, trampoline, sizeof(buf))) + return -EFAULT; + + upper = buf[0] & 0xffff; + lower = buf[1] & 0xffff; + + /* perform the addis/addi, both signed */ + offset = ((short)upper << 16) + (short)lower; + + /* + * Now get the address this trampoline jumps to. This + * is always 32 bytes into our trampoline stub. + */ + toc_entry = (void *)mod->arch.toc + offset + 32; + + if (probe_kernel_read(target, toc_entry, sizeof(*target))) + return -EFAULT; + + return 0; +} + #endif -} }; /* Count how many different 24-bit relocations (different symbol, different addend) */ @@ -183,6 +308,7 @@ static unsigned long get_stubs_size(const Elf64_Ehdr *hdr, return relocs * sizeof(struct ppc64_stub_entry); } +/* Still needed for ELFv2, for .TOC. */ static void dedotify_versions(struct modversion_info *vers, unsigned long size) { @@ -193,7 +319,7 @@ static void dedotify_versions(struct modversion_info *vers, memmove(vers->name, vers->name+1, strlen(vers->name)); } -/* Undefined symbols which refer to .funcname, hack to funcname */ +/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) { unsigned int i; @@ -207,6 +333,24 @@ static void dedotify(Elf64_Sym *syms, unsigned int numsyms, char *strtab) } } +static Elf64_Sym *find_dot_toc(Elf64_Shdr *sechdrs, + const char *strtab, + unsigned int symindex) +{ + unsigned int i, numsyms; + Elf64_Sym *syms; + + syms = (Elf64_Sym *)sechdrs[symindex].sh_addr; + numsyms = sechdrs[symindex].sh_size / sizeof(Elf64_Sym); + + for (i = 1; i < numsyms; i++) { + if (syms[i].st_shndx == SHN_UNDEF + && strcmp(strtab + syms[i].st_name, "TOC.") == 0) + return &syms[i]; + } + return NULL; +} + int module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs, char *secstrings, @@ -271,21 +415,12 @@ static inline unsigned long my_r2(Elf64_Shdr *sechdrs, struct module *me) /* Patch stub to reference function and correct r2 value. */ static inline int create_stub(Elf64_Shdr *sechdrs, struct ppc64_stub_entry *entry, - struct ppc64_opd_entry *opd, + unsigned long addr, struct module *me) { - Elf64_Half *loc1, *loc2; long reladdr; - *entry = ppc64_stub; - -#ifdef __LITTLE_ENDIAN__ - loc1 = (Elf64_Half *)&entry->jump[0]; - loc2 = (Elf64_Half *)&entry->jump[4]; -#else - loc1 = (Elf64_Half *)&entry->jump[2]; - loc2 = (Elf64_Half *)&entry->jump[6]; -#endif + memcpy(entry->jump, ppc64_stub_insns, sizeof(ppc64_stub_insns)); /* Stub uses address relative to r2. */ reladdr = (unsigned long)entry - my_r2(sechdrs, me); @@ -296,35 +431,33 @@ static inline int create_stub(Elf64_Shdr *sechdrs, } DEBUGP("Stub %p get data from reladdr %li\n", entry, reladdr); - *loc1 = PPC_HA(reladdr); - *loc2 = PPC_LO(reladdr); - entry->opd.funcaddr = opd->funcaddr; - entry->opd.r2 = opd->r2; + entry->jump[0] |= PPC_HA(reladdr); + entry->jump[1] |= PPC_LO(reladdr); + entry->funcdata = func_desc(addr); return 1; } -/* Create stub to jump to function described in this OPD: we need the +/* Create stub to jump to function described in this OPD/ptr: we need the stub to set up the TOC ptr (r2) for the function. */ static unsigned long stub_for_addr(Elf64_Shdr *sechdrs, - unsigned long opdaddr, + unsigned long addr, struct module *me) { struct ppc64_stub_entry *stubs; - struct ppc64_opd_entry *opd = (void *)opdaddr; unsigned int i, num_stubs; num_stubs = sechdrs[me->arch.stubs_section].sh_size / sizeof(*stubs); /* Find this stub, or if that fails, the next avail. entry */ stubs = (void *)sechdrs[me->arch.stubs_section].sh_addr; - for (i = 0; stubs[i].opd.funcaddr; i++) { + for (i = 0; stub_func_addr(stubs[i].funcdata); i++) { BUG_ON(i >= num_stubs); - if (stubs[i].opd.funcaddr == opd->funcaddr) + if (stub_func_addr(stubs[i].funcdata) == func_addr(addr)) return (unsigned long)&stubs[i]; } - if (!create_stub(sechdrs, &stubs[i], opd, me)) + if (!create_stub(sechdrs, &stubs[i], addr, me)) return 0; return (unsigned long)&stubs[i]; @@ -339,7 +472,8 @@ static int restore_r2(u32 *instruction, struct module *me) me->name, *instruction); return 0; } - *instruction = 0xe8410028; /* ld r2,40(r1) */ + /* ld r2,R2_STACK_OFFSET(r1) */ + *instruction = 0xe8410000 | R2_STACK_OFFSET; return 1; } @@ -357,6 +491,17 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, DEBUGP("Applying ADD relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); + + /* First time we're called, we can fix up .TOC. */ + if (!me->arch.toc_fixed) { + sym = find_dot_toc(sechdrs, strtab, symindex); + /* It's theoretically possible that a module doesn't want a + * .TOC. so don't fail it just for that. */ + if (sym) + sym->st_value = my_r2(sechdrs, me); + me->arch.toc_fixed = true; + } + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr @@ -453,7 +598,8 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, return -ENOENT; if (!restore_r2((u32 *)location + 1, me)) return -ENOEXEC; - } + } else + value += local_entry_offset(sym); /* Convert value to relative */ value -= (unsigned long)location; @@ -474,6 +620,31 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, *location = value - (unsigned long)location; break; + case R_PPC64_TOCSAVE: + /* + * Marker reloc indicates we don't have to save r2. + * That would only save us one instruction, so ignore + * it. + */ + break; + + case R_PPC64_REL16_HA: + /* Subtract location pointer */ + value -= (unsigned long)location; + value = ((value + 0x8000) >> 16); + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + + case R_PPC64_REL16_LO: + /* Subtract location pointer */ + value -= (unsigned long)location; + *((uint16_t *) location) + = (*((uint16_t *) location) & ~0xffff) + | (value & 0xffff); + break; + default: printk("%s: Unknown ADD relocation: %lu\n", me->name, diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 31d021506d21..2ae1b99166c6 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -54,6 +54,7 @@ #ifdef CONFIG_PPC64 #include <asm/firmware.h> #endif +#include <asm/code-patching.h> #include <linux/kprobes.h> #include <linux/kdebug.h> @@ -1108,7 +1109,9 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, struct thread_info *ti = (void *)task_stack_page(p); memset(childregs, 0, sizeof(struct pt_regs)); childregs->gpr[1] = sp + sizeof(struct pt_regs); - childregs->gpr[14] = usp; /* function */ + /* function */ + if (usp) + childregs->gpr[14] = ppc_function_entry((void *)usp); #ifdef CONFIG_PPC64 clear_tsk_thread_flag(p, TIF_32BIT); childregs->softe = 1; @@ -1187,17 +1190,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, if (cpu_has_feature(CPU_FTR_HAS_PPR)) p->thread.ppr = INIT_PPR; #endif - /* - * The PPC64 ABI makes use of a TOC to contain function - * pointers. The function (ret_from_except) is actually a pointer - * to the TOC entry. The first entry is a pointer to the actual - * function. - */ -#ifdef CONFIG_PPC64 - kregs->nip = *((unsigned long *)f); -#else - kregs->nip = (unsigned long)f; -#endif + kregs->nip = ppc_function_entry(f); return 0; } diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh index b0c263da219a..77aa1e95e904 100644 --- a/arch/powerpc/kernel/prom_init_check.sh +++ b/arch/powerpc/kernel/prom_init_check.sh @@ -23,7 +23,7 @@ strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 reloc_got2 kernstart_addr memstart_addr linux_banner _stext opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry boot_command_line __prom_init_toc_start __prom_init_toc_end -btext_setup_display" +btext_setup_display TOC." NM="$1" OBJ="$2" diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index fbe24377eda3..90b532ace0d5 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -341,7 +341,7 @@ void smp_release_cpus(void) ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); - *ptr = __pa(generic_secondary_smp_init); + *ptr = ppc_function_entry(generic_secondary_smp_init); /* And wait a bit for them to catch up */ for (i = 0; i < 100000; i++) { diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 93219c34af32..895c50ca943c 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S @@ -17,12 +17,12 @@ #include <asm/ppc_asm.h> #ifdef CONFIG_PPC64 -#define SYSCALL(func) .llong .sys_##func,.sys_##func -#define COMPAT_SYS(func) .llong .sys_##func,.compat_sys_##func -#define PPC_SYS(func) .llong .ppc_##func,.ppc_##func -#define OLDSYS(func) .llong .sys_ni_syscall,.sys_ni_syscall -#define SYS32ONLY(func) .llong .sys_ni_syscall,.compat_sys_##func -#define SYSX(f, f3264, f32) .llong .f,.f3264 +#define SYSCALL(func) .llong DOTSYM(sys_##func),DOTSYM(sys_##func) +#define COMPAT_SYS(func) .llong DOTSYM(sys_##func),DOTSYM(compat_sys_##func) +#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func) +#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall) +#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func) +#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264) #else #define SYSCALL(func) .long sys_##func #define COMPAT_SYS(func) .long sys_##func @@ -36,6 +36,8 @@ #define PPC_SYS_SPU(func) PPC_SYS(func) #define SYSX_SPU(f, f3264, f32) SYSX(f, f3264, f32) +.section .rodata,"a" + #ifdef CONFIG_PPC64 #define sys_sigpending sys_ni_syscall #define sys_old_getrlimit sys_ni_syscall @@ -43,5 +45,7 @@ .p2align 3 #endif -_GLOBAL(sys_call_table) +.globl sys_call_table +sys_call_table: + #include <asm/systbl.h> diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S index 508c54b92fa6..ee061c3715de 100644 --- a/arch/powerpc/kernel/tm.S +++ b/arch/powerpc/kernel/tm.S @@ -42,7 +42,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX); \ /* Stack frame offsets for local variables. */ #define TM_FRAME_L0 TM_FRAME_SIZE-16 #define TM_FRAME_L1 TM_FRAME_SIZE-8 -#define STACK_PARAM(x) (48+((x)*8)) /* In order to access the TM SPRs, TM must be enabled. So, do so: */ @@ -109,12 +108,12 @@ _GLOBAL(tm_reclaim) mflr r0 stw r6, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. */ - std r3, STACK_PARAM(0)(r1) + std r3, STK_PARAM(R3)(r1) SAVE_NVGPRS(r1) /* We need to setup MSR for VSX register save instructions. Here we @@ -210,7 +209,7 @@ dont_backup_fp: /* Now get some more GPRS free */ std r7, GPR7(r1) /* Temporary stash */ std r12, GPR12(r1) /* '' '' '' */ - ld r12, STACK_PARAM(0)(r1) /* Param 0, thread_struct * */ + ld r12, STK_PARAM(R3)(r1) /* Param 0, thread_struct * */ std r11, THREAD_TM_PPR(r12) /* Store PPR and free r11 */ @@ -297,7 +296,7 @@ dont_backup_fp: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) @@ -320,7 +319,7 @@ _GLOBAL(__tm_recheckpoint) mflr r0 stw r5, 8(r1) std r0, 16(r1) - std r2, 40(r1) + std r2, STK_GOT(r1) stdu r1, -TM_FRAME_SIZE(r1) /* We've a struct pt_regs at [r1+STACK_FRAME_OVERHEAD]. @@ -478,7 +477,7 @@ restore_gprs: ld r0, 16(r1) mtcr r4 mtlr r0 - ld r2, 40(r1) + ld r2, STK_GOT(r1) /* Load system default DSCR */ ld r4, DSCR_DEFAULT@toc(r2) |