diff options
Diffstat (limited to 'arch/s390/kernel/entry.S')
-rw-r--r-- | arch/s390/kernel/entry.S | 614 |
1 files changed, 449 insertions, 165 deletions
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index 84062e7a77da..247b7aae4c6d 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -20,6 +20,8 @@ #include <asm/page.h> #include <asm/sigp.h> #include <asm/irq.h> +#include <asm/fpu-internal.h> +#include <asm/vx-insn.h> __PT_R0 = __PT_GPRS __PT_R1 = __PT_GPRS + 8 @@ -46,10 +48,10 @@ _TIF_WORK = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \ _TIF_UPROBE) _TIF_TRACE = (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SECCOMP | \ _TIF_SYSCALL_TRACEPOINT) -_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE) +_CIF_WORK = (_CIF_MCCK_PENDING | _CIF_ASCE | _CIF_FPU) _PIF_WORK = (_PIF_PER_TRAP) -#define BASED(name) name-system_call(%r13) +#define BASED(name) name-cleanup_critical(%r13) .macro TRACE_IRQS_ON #ifdef CONFIG_TRACE_IRQFLAGS @@ -73,38 +75,6 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro LPP newpp -#if IS_ENABLED(CONFIG_KVM) - tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP - jz .+8 - .insn s,0xb2800000,\newpp -#endif - .endm - - .macro HANDLE_SIE_INTERCEPT scratch,reason -#if IS_ENABLED(CONFIG_KVM) - tmhh %r8,0x0001 # interrupting from user ? - jnz .+62 - lgr \scratch,%r9 - slg \scratch,BASED(.Lsie_critical) - clg \scratch,BASED(.Lsie_critical_length) - .if \reason==1 - # Some program interrupts are suppressing (e.g. protection). - # We must also check the instruction after SIE in that case. - # do_protection_exception will rewind to .Lrewind_pad - jh .+42 - .else - jhe .+42 - .endif - lg %r14,__SF_EMPTY(%r15) # get control block pointer - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce - larl %r9,sie_exit # skip forward to sie_exit - mvi __SF_EMPTY+31(%r15),\reason # set exit reason -#endif - .endm - .macro CHECK_STACK stacksize,savearea #ifdef CONFIG_CHECK_STACK tml %r15,\stacksize - CONFIG_STACK_GUARD @@ -113,7 +83,7 @@ _PIF_WORK = (_PIF_PER_TRAP) #endif .endm - .macro SWITCH_ASYNC savearea,stack,shift + .macro SWITCH_ASYNC savearea,timer tmhh %r8,0x0001 # interrupting from user ? jnz 1f lgr %r14,%r9 @@ -124,26 +94,28 @@ _PIF_WORK = (_PIF_PER_TRAP) brasl %r14,cleanup_critical tmhh %r8,0x0001 # retest problem state after cleanup jnz 1f -0: lg %r14,\stack # are we already on the target stack? +0: lg %r14,__LC_ASYNC_STACK # are we already on the async stack? slgr %r14,%r15 - srag %r14,%r14,\shift - jnz 1f - CHECK_STACK 1<<\shift,\savearea + srag %r14,%r14,STACK_SHIFT + jnz 2f + CHECK_STACK 1<<STACK_SHIFT,\savearea aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: lg %r15,\stack # load target stack -2: la %r11,STACK_FRAME_OVERHEAD(%r15) + j 3f +1: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,\timer +2: lg %r15,__LC_ASYNC_STACK # load async stack +3: la %r11,STACK_FRAME_OVERHEAD(%r15) .endm - .macro UPDATE_VTIME scratch,enter_timer - lg \scratch,__LC_EXIT_TIMER - slg \scratch,\enter_timer - alg \scratch,__LC_USER_TIMER - stg \scratch,__LC_USER_TIMER - lg \scratch,__LC_LAST_UPDATE_TIMER - slg \scratch,__LC_EXIT_TIMER - alg \scratch,__LC_SYSTEM_TIMER - stg \scratch,__LC_SYSTEM_TIMER + .macro UPDATE_VTIME w1,w2,enter_timer + lg \w1,__LC_EXIT_TIMER + lg \w2,__LC_LAST_UPDATE_TIMER + slg \w1,\enter_timer + slg \w2,__LC_EXIT_TIMER + alg \w1,__LC_USER_TIMER + alg \w2,__LC_SYSTEM_TIMER + stg \w1,__LC_USER_TIMER + stg \w2,__LC_SYSTEM_TIMER mvc __LC_LAST_UPDATE_TIMER(8),\enter_timer .endm @@ -197,6 +169,69 @@ ENTRY(__switch_to) br %r14 .L__critical_start: + +#if IS_ENABLED(CONFIG_KVM) +/* + * sie64a calling convention: + * %r2 pointer to sie control block + * %r3 guest register save area + */ +ENTRY(sie64a) + stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers + stg %r2,__SF_EMPTY(%r15) # save control block pointer + stg %r3,__SF_EMPTY+8(%r15) # save guest register save area + xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason + tm __LC_CPU_FLAGS+7,_CIF_FPU # load guest fp/vx registers ? + jno .Lsie_load_guest_gprs + brasl %r14,load_fpu_regs # load guest fp/vx regs +.Lsie_load_guest_gprs: + lmg %r0,%r13,0(%r3) # load guest gprs 0-13 + lg %r14,__LC_GMAP # get gmap pointer + ltgr %r14,%r14 + jz .Lsie_gmap + lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce +.Lsie_gmap: + lg %r14,__SF_EMPTY(%r15) # get control block pointer + oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now + tm __SIE_PROG20+3(%r14),3 # last exit... + jnz .Lsie_skip + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsie_skip # exit if fp/vx regs changed + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_enter + .insn s,0xb2800000,__LC_CURRENT_PID # set guest id to pid +.Lsie_enter: + sie 0(%r14) + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz .Lsie_skip + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +.Lsie_skip: + ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce +.Lsie_done: +# some program checks are suppressing. C code (e.g. do_protection_exception) +# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other +# instructions between sie64a and .Lsie_done should not cause program +# interrupts. So lets use a nop (47 00 00 00) as a landing pad. +# See also .Lcleanup_sie +.Lrewind_pad: + nop 0 + .globl sie_exit +sie_exit: + lg %r14,__SF_EMPTY+8(%r15) # load guest register save area + stmg %r0,%r13,0(%r14) # save guest gprs 0-13 + lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers + lg %r2,__SF_EMPTY+24(%r15) # return exit reason code + br %r14 +.Lsie_fault: + lghi %r14,-EFAULT + stg %r14,__SF_EMPTY+24(%r15) # set exit reason code + j sie_exit + + EX_TABLE(.Lrewind_pad,.Lsie_fault) + EX_TABLE(sie_exit,.Lsie_fault) +#endif + /* * SVC interrupt handler routine. System calls are synchronous events and * are executed with interrupts enabled. @@ -212,9 +247,9 @@ ENTRY(system_call) .Lsysc_per: lg %r15,__LC_KERNEL_STACK la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs -.Lsysc_vtime: - UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER LAST_BREAK %r13 +.Lsysc_vtime: + UPDATE_VTIME %r10,%r13,__LC_SYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW @@ -244,8 +279,6 @@ ENTRY(system_call) .Lsysc_return: LOCKDEP_SYS_EXIT .Lsysc_tif: - tm __PT_PSW+1(%r11),0x01 # returning to user ? - jno .Lsysc_restore tm __PT_FLAGS+7(%r11),_PIF_WORK jnz .Lsysc_work tm __TI_flags+7(%r12),_TIF_WORK @@ -280,6 +313,8 @@ ENTRY(system_call) jo .Lsysc_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lsysc_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lsysc_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lsysc_uaccess j .Lsysc_return # beware of critical section cleanup @@ -307,6 +342,13 @@ ENTRY(system_call) j .Lsysc_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lsysc_vxrs: + larl %r14,.Lsysc_return + jg load_fpu_regs + +# # _TIF_SIGPENDING is set, call do_signal # .Lsysc_sigpending: @@ -405,28 +447,35 @@ ENTRY(pgm_check_handler) stmg %r8,%r15,__LC_SAVE_AREA_SYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_PGM_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,1 tmhh %r8,0x0001 # test problem state bit - jnz 1f # -> fault in user space - tmhh %r8,0x4000 # PER bit set in old PSW ? - jnz 0f # -> enabled, can't be a double fault + jnz 2f # -> fault in user space +#if IS_ENABLED(CONFIG_KVM) + # cleanup critical section for sie64a + lgr %r14,%r9 + slg %r14,BASED(.Lsie_critical_start) + clg %r14,BASED(.Lsie_critical_length) + jhe 0f + brasl %r14,.Lcleanup_sie +#endif +0: tmhh %r8,0x4000 # PER bit set in old PSW ? + jnz 1f # -> enabled, can't be a double fault tm __LC_PGM_ILC+3,0x80 # check for per exception jnz .Lpgm_svcper # -> single stepped svc -0: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC +1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 2f -1: UPDATE_VTIME %r14,__LC_SYNC_ENTER_TIMER - LAST_BREAK %r14 + j 3f +2: LAST_BREAK %r14 + UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER lg %r15,__LC_KERNEL_STACK lg %r14,__TI_task(%r12) aghi %r14,__TASK_thread # pointer to thread_struct lghi %r13,__LC_PGM_TDB tm __LC_PGM_ILC+2,0x02 # check for transaction abort - jz 2f + jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -2: la %r11,STACK_FRAME_OVERHEAD(%r15) +3: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -435,24 +484,28 @@ ENTRY(pgm_check_handler) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 0f + jz 4f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -0: REENABLE_IRQS +4: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) nill %r10,0x007f sll %r10,2 - je .Lsysc_return + je .Lpgm_return lgf %r1,0(%r10,%r1) # load address of handler routine lgr %r2,%r11 # pass pointer to pt_regs basr %r14,%r1 # branch to interrupt-handler - j .Lsysc_return +.Lpgm_return: + LOCKDEP_SYS_EXIT + tm __PT_PSW+1(%r11),0x01 # returning to user ? + jno .Lsysc_restore + j .Lsysc_tif # # PER event in supervisor state, must be kprobes @@ -462,7 +515,7 @@ ENTRY(pgm_check_handler) xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) lgr %r2,%r11 # pass pointer to pt_regs brasl %r14,do_per_trap - j .Lsysc_return + j .Lpgm_return # # single stepped system call @@ -483,15 +536,9 @@ ENTRY(io_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_IO_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,2 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user? - jz .Lio_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lio_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -587,6 +634,8 @@ ENTRY(io_int_handler) jo .Lio_sigpending tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME jo .Lio_notify_resume + tm __LC_CPU_FLAGS+7,_CIF_FPU + jo .Lio_vxrs tm __LC_CPU_FLAGS+7,_CIF_ASCE jo .Lio_uaccess j .Lio_return # beware of critical section cleanup @@ -609,6 +658,13 @@ ENTRY(io_int_handler) j .Lio_return # +# CIF_FPU is set, restore floating-point controls and floating-point registers. +# +.Lio_vxrs: + larl %r14,.Lio_return + jg load_fpu_regs + +# # _TIF_NEED_RESCHED is set, call schedule # .Lio_reschedule: @@ -652,15 +708,9 @@ ENTRY(ext_int_handler) stmg %r8,%r15,__LC_SAVE_AREA_ASYNC lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_EXT_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,3 - SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT - tmhh %r8,0x0001 # interrupting from user ? - jz .Lext_skip - UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER - LAST_BREAK %r14 -.Lext_skip: + SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_ENTER_TIMER stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -690,6 +740,122 @@ ENTRY(psw_idle) br %r14 .Lpsw_idle_end: +/* Store floating-point controls and floating-point or vector extension + * registers instead. A critical section cleanup assures that the registers + * are stored even if interrupted for some other work. The register %r2 + * designates a struct fpu to store register contents. If the specified + * structure does not contain a register save area, the register store is + * omitted (see also comments in arch_dup_task_struct()). + * + * The CIF_FPU flag is set in any case. The CIF_FPU triggers a lazy restore + * of the register contents at system call or io return. + */ +ENTRY(save_fpu_regs) + lg %r2,__LC_CURRENT + aghi %r2,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + stfpc __THREAD_FPU_fpc(%r2) +.Lsave_fpu_regs_fpc_end: + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz .Lsave_fpu_regs_done # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz .Lsave_fpu_regs_fp # no -> store FP regs +.Lsave_fpu_regs_vx_low: + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +.Lsave_fpu_regs_vx_high: + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j .Lsave_fpu_regs_done # -> set CIF_FPU flag +.Lsave_fpu_regs_fp: + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +.Lsave_fpu_regs_done: + oi __LC_CPU_FLAGS+7,_CIF_FPU + br %r14 +.Lsave_fpu_regs_end: + +/* Load floating-point controls and floating-point or vector extension + * registers. A critical section cleanup assures that the register contents + * are loaded even if interrupted for some other work. Depending on the saved + * FP/VX state, the vector-enablement control, CR0.46, is either set or cleared. + * + * There are special calling conventions to fit into sysc and io return work: + * %r15: <kernel stack> + * The function requires: + * %r4 and __SF_EMPTY+32(%r15) + */ +load_fpu_regs: + lg %r4,__LC_CURRENT + aghi %r4,__TASK_thread + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + lfpc __THREAD_FPU_fpc(%r4) + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz .Lload_fpu_regs_fp_ctl # -> no VX, load FP regs +.Lload_fpu_regs_vx_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo .Lload_fpu_regs_vx + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_vx: + VLM %v0,%v15,0,%r4 +.Lload_fpu_regs_vx_high: + VLM %v16,%v31,256,%r4 + j .Lload_fpu_regs_done +.Lload_fpu_regs_fp_ctl: + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz .Lload_fpu_regs_fp + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +.Lload_fpu_regs_fp: + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +.Lload_fpu_regs_done: + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + br %r14 +.Lload_fpu_regs_end: + +/* Test and set the vector enablement control in CR0.46 */ +ENTRY(__ctl_set_vx) + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + br %r14 +.L__ctl_set_vx_end: + .L__critical_end: /* @@ -702,9 +868,8 @@ ENTRY(mcck_int_handler) lmg %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# revalidate gprs lg %r10,__LC_LAST_BREAK lg %r12,__LC_THREAD_INFO - larl %r13,system_call + larl %r13,cleanup_critical lmg %r8,%r9,__LC_MCK_OLD_PSW - HANDLE_SIE_INTERCEPT %r14,4 tm __LC_MCCK_CODE,0x80 # system damage? jo .Lmcck_panic # yes -> rest of mcck code invalid lghi %r14,__LC_CPU_TIMER_SAVE_AREA @@ -725,11 +890,7 @@ ENTRY(mcck_int_handler) mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 3: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? jno .Lmcck_panic # no -> skip cleanup critical - SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT - tm %r8,0x0001 # interrupting from user ? - jz .Lmcck_skip - UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER - LAST_BREAK %r14 + SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_MCCK_ENTER_TIMER .Lmcck_skip: lghi %r14,__LC_GPREGS_SAVE_AREA+64 stmg %r0,%r7,__PT_R0(%r11) @@ -764,12 +925,8 @@ ENTRY(mcck_int_handler) lpswe __LC_RETURN_MCCK_PSW .Lmcck_panic: - lg %r14,__LC_PANIC_STACK - slgr %r14,%r15 - srag %r14,%r14,PAGE_SHIFT - jz 0f lg %r15,__LC_PANIC_STACK -0: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) + aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) j .Lmcck_skip # @@ -819,20 +976,13 @@ stack_overflow: jg kernel_stack_overflow #endif - .align 8 -.Lcleanup_table: - .quad system_call - .quad .Lsysc_do_svc - .quad .Lsysc_tif - .quad .Lsysc_restore - .quad .Lsysc_done - .quad .Lio_tif - .quad .Lio_restore - .quad .Lio_done - .quad psw_idle - .quad .Lpsw_idle_end - cleanup_critical: +#if IS_ENABLED(CONFIG_KVM) + clg %r9,BASED(.Lcleanup_table_sie) # .Lsie_gmap + jl 0f + clg %r9,BASED(.Lcleanup_table_sie+8)# .Lsie_done + jl .Lcleanup_sie +#endif clg %r9,BASED(.Lcleanup_table) # system_call jl 0f clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc @@ -853,8 +1003,54 @@ cleanup_critical: jl 0f clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end jl .Lcleanup_idle + clg %r9,BASED(.Lcleanup_table+80) # save_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+88) # .Lsave_fpu_regs_end + jl .Lcleanup_save_fpu_regs + clg %r9,BASED(.Lcleanup_table+96) # load_fpu_regs + jl 0f + clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end + jl .Lcleanup_load_fpu_regs + clg %r9,BASED(.Lcleanup_table+112) # __ctl_set_vx + jl 0f + clg %r9,BASED(.Lcleanup_table+120) # .L__ctl_set_vx_end + jl .Lcleanup___ctl_set_vx 0: br %r14 + .align 8 +.Lcleanup_table: + .quad system_call + .quad .Lsysc_do_svc + .quad .Lsysc_tif + .quad .Lsysc_restore + .quad .Lsysc_done + .quad .Lio_tif + .quad .Lio_restore + .quad .Lio_done + .quad psw_idle + .quad .Lpsw_idle_end + .quad save_fpu_regs + .quad .Lsave_fpu_regs_end + .quad load_fpu_regs + .quad .Lload_fpu_regs_end + .quad __ctl_set_vx + .quad .L__ctl_set_vx_end + +#if IS_ENABLED(CONFIG_KVM) +.Lcleanup_table_sie: + .quad .Lsie_gmap + .quad .Lsie_done + +.Lcleanup_sie: + lg %r9,__SF_EMPTY(%r15) # get control block pointer + tm __LC_MACHINE_FLAGS+6,0x20 # MACHINE_FLAG_LPP + jz 0f + .insn s,0xb2800000,__SF_EMPTY+16(%r15)# set host id +0: ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE + lctlg %c1,%c1,__LC_USER_ASCE # load primary asce + larl %r9,sie_exit # skip forward to sie_exit + br %r14 +#endif .Lcleanup_system_call: # check if stpt has been executed @@ -915,7 +1111,7 @@ cleanup_critical: .quad system_call .quad .Lsysc_stmg .quad .Lsysc_per - .quad .Lsysc_vtime+18 + .quad .Lsysc_vtime+36 .quad .Lsysc_vtime+42 .Lcleanup_sysc_tif: @@ -981,6 +1177,145 @@ cleanup_critical: .Lcleanup_idle_insn: .quad .Lpsw_idle_lpsw +.Lcleanup_save_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bor %r14 + clg %r9,BASED(.Lcleanup_save_fpu_regs_done) + jhe 5f + clg %r9,BASED(.Lcleanup_save_fpu_regs_fp) + jhe 4f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_high) + jhe 3f + clg %r9,BASED(.Lcleanup_save_fpu_regs_vx_low) + jhe 2f + clg %r9,BASED(.Lcleanup_save_fpu_fpc_end) + jhe 1f + lg %r2,__LC_CURRENT +0: # Store floating-point controls + stfpc __THREAD_FPU_fpc(%r2) +1: # Load register save area and check if VX is active + lg %r3,__THREAD_FPU_regs(%r2) + ltgr %r3,%r3 + jz 5f # no save area -> set CIF_FPU + tm __THREAD_FPU_flags+3(%r2),FPU_USE_VX + jz 4f # no VX -> store FP regs +2: # Store vector registers (V0-V15) + VSTM %v0,%v15,0,%r3 # vstm 0,15,0(3) +3: # Store vector registers (V16-V31) + VSTM %v16,%v31,256,%r3 # vstm 16,31,256(3) + j 5f # -> done, set CIF_FPU flag +4: # Store floating-point registers + std 0,0(%r3) + std 1,8(%r3) + std 2,16(%r3) + std 3,24(%r3) + std 4,32(%r3) + std 5,40(%r3) + std 6,48(%r3) + std 7,56(%r3) + std 8,64(%r3) + std 9,72(%r3) + std 10,80(%r3) + std 11,88(%r3) + std 12,96(%r3) + std 13,104(%r3) + std 14,112(%r3) + std 15,120(%r3) +5: # Set CIF_FPU flag + oi __LC_CPU_FLAGS+7,_CIF_FPU + lg %r9,48(%r11) # return from save_fpu_regs + br %r14 +.Lcleanup_save_fpu_fpc_end: + .quad .Lsave_fpu_regs_fpc_end +.Lcleanup_save_fpu_regs_vx_low: + .quad .Lsave_fpu_regs_vx_low +.Lcleanup_save_fpu_regs_vx_high: + .quad .Lsave_fpu_regs_vx_high +.Lcleanup_save_fpu_regs_fp: + .quad .Lsave_fpu_regs_fp +.Lcleanup_save_fpu_regs_done: + .quad .Lsave_fpu_regs_done + +.Lcleanup_load_fpu_regs: + tm __LC_CPU_FLAGS+7,_CIF_FPU + bnor %r14 + clg %r9,BASED(.Lcleanup_load_fpu_regs_done) + jhe 1f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp) + jhe 2f + clg %r9,BASED(.Lcleanup_load_fpu_regs_fp_ctl) + jhe 3f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_high) + jhe 4f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx) + jhe 5f + clg %r9,BASED(.Lcleanup_load_fpu_regs_vx_ctl) + jhe 6f + lg %r4,__LC_CURRENT + lfpc __THREAD_FPU_fpc(%r4) + tm __THREAD_FPU_flags+3(%r4),FPU_USE_VX # VX-enabled task ? + lg %r4,__THREAD_FPU_regs(%r4) # %r4 <- reg save area + jz 3f # -> no VX, load FP regs +6: # Set VX-enablement control + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jo 5f + oi __SF_EMPTY+32+5(%r15),2 # set VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +5: # Load V0 ..V15 registers + VLM %v0,%v15,0,%r4 +4: # Load V16..V31 registers + VLM %v16,%v31,256,%r4 + j 1f +3: # Clear VX-enablement control for FP + stctg %c0,%c0,__SF_EMPTY+32(%r15) # store CR0 + tm __SF_EMPTY+32+5(%r15),2 # test VX control + jz 2f + ni __SF_EMPTY+32+5(%r15),253 # clear VX control + lctlg %c0,%c0,__SF_EMPTY+32(%r15) +2: # Load floating-point registers + ld 0,0(%r4) + ld 1,8(%r4) + ld 2,16(%r4) + ld 3,24(%r4) + ld 4,32(%r4) + ld 5,40(%r4) + ld 6,48(%r4) + ld 7,56(%r4) + ld 8,64(%r4) + ld 9,72(%r4) + ld 10,80(%r4) + ld 11,88(%r4) + ld 12,96(%r4) + ld 13,104(%r4) + ld 14,112(%r4) + ld 15,120(%r4) +1: # Clear CIF_FPU bit + ni __LC_CPU_FLAGS+7,255-_CIF_FPU + lg %r9,48(%r11) # return from load_fpu_regs + br %r14 +.Lcleanup_load_fpu_regs_vx_ctl: + .quad .Lload_fpu_regs_vx_ctl +.Lcleanup_load_fpu_regs_vx: + .quad .Lload_fpu_regs_vx +.Lcleanup_load_fpu_regs_vx_high: + .quad .Lload_fpu_regs_vx_high +.Lcleanup_load_fpu_regs_fp_ctl: + .quad .Lload_fpu_regs_fp_ctl +.Lcleanup_load_fpu_regs_fp: + .quad .Lload_fpu_regs_fp +.Lcleanup_load_fpu_regs_done: + .quad .Lload_fpu_regs_done + +.Lcleanup___ctl_set_vx: + stctg %c0,%c0,__SF_EMPTY(%r15) + tm __SF_EMPTY+5(%r15),2 + bor %r14 + oi __SF_EMPTY+5(%r15),2 + lctlg %c0,%c0,__SF_EMPTY(%r15) + lg %r9,48(%r11) # return from __ctl_set_vx + br %r14 + /* * Integer constants */ @@ -989,62 +1324,11 @@ cleanup_critical: .quad .L__critical_start .Lcritical_length: .quad .L__critical_end - .L__critical_start - - #if IS_ENABLED(CONFIG_KVM) -/* - * sie64a calling convention: - * %r2 pointer to sie control block - * %r3 guest register save area - */ -ENTRY(sie64a) - stmg %r6,%r14,__SF_GPRS(%r15) # save kernel registers - stg %r2,__SF_EMPTY(%r15) # save control block pointer - stg %r3,__SF_EMPTY+8(%r15) # save guest register save area - xc __SF_EMPTY+16(16,%r15),__SF_EMPTY+16(%r15) # host id & reason - lmg %r0,%r13,0(%r3) # load guest gprs 0-13 - lg %r14,__LC_GMAP # get gmap pointer - ltgr %r14,%r14 - jz .Lsie_gmap - lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce -.Lsie_gmap: - lg %r14,__SF_EMPTY(%r15) # get control block pointer - oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now - tm __SIE_PROG20+3(%r14),3 # last exit... - jnz .Lsie_done - LPP __SF_EMPTY(%r15) # set guest id - sie 0(%r14) -.Lsie_done: - LPP __SF_EMPTY+16(%r15) # set host id - ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE - lctlg %c1,%c1,__LC_USER_ASCE # load primary asce -# some program checks are suppressing. C code (e.g. do_protection_exception) -# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other -# instructions between sie64a and .Lsie_done should not cause program -# interrupts. So lets use a nop (47 00 00 00) as a landing pad. -# See also HANDLE_SIE_INTERCEPT -.Lrewind_pad: - nop 0 - .globl sie_exit -sie_exit: - lg %r14,__SF_EMPTY+8(%r15) # load guest register save area - stmg %r0,%r13,0(%r14) # save guest gprs 0-13 - lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers - lg %r2,__SF_EMPTY+24(%r15) # return exit reason code - br %r14 -.Lsie_fault: - lghi %r14,-EFAULT - stg %r14,__SF_EMPTY+24(%r15) # set exit reason code - j sie_exit - - .align 8 -.Lsie_critical: +.Lsie_critical_start: .quad .Lsie_gmap .Lsie_critical_length: .quad .Lsie_done - .Lsie_gmap - - EX_TABLE(.Lrewind_pad,.Lsie_fault) - EX_TABLE(sie_exit,.Lsie_fault) #endif .section .rodata, "a" |