diff options
author | Heiko Carstens <hca@linux.ibm.com> | 2024-02-03 13:45:06 +0300 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2024-02-16 16:30:15 +0300 |
commit | f4e3de75d0c4ebe9bbbfef19d7845ee70cb017bd (patch) | |
tree | 0a2fde7fc7d1943631784242226ce9ba5019705f | |
parent | 88d8136a0896e32fc39f90788eaa5c7bdccc9fb0 (diff) | |
download | linux-f4e3de75d0c4ebe9bbbfef19d7845ee70cb017bd.tar.xz |
s390/fpu: provide and use lfpc, sfpc, and stfpc inline assemblies
Instead of open-coding lfpc, sfpc, and stfpc inline assemblies at
several locations, provide an fpu_* function for each instruction and
use the function instead.
Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
-rw-r--r-- | arch/s390/include/asm/fpu-insn.h | 26 | ||||
-rw-r--r-- | arch/s390/kernel/fpu.c | 14 | ||||
-rw-r--r-- | arch/s390/kernel/process.c | 2 |
3 files changed, 32 insertions, 10 deletions
diff --git a/arch/s390/include/asm/fpu-insn.h b/arch/s390/include/asm/fpu-insn.h index 1ce8e2f9786c..df2cad95b598 100644 --- a/arch/s390/include/asm/fpu-insn.h +++ b/arch/s390/include/asm/fpu-insn.h @@ -45,6 +45,15 @@ static __always_inline void fpu_ld(unsigned short fpr, freg_t *reg) : "memory"); } +static __always_inline void fpu_lfpc(unsigned int *fpc) +{ + instrument_read(fpc, sizeof(*fpc)); + asm volatile("lfpc %[fpc]" + : + : [fpc] "Q" (*fpc) + : "memory"); +} + /** * fpu_lfpc_safe - Load floating point control register safely. * @fpc: new value for floating point control register @@ -82,5 +91,22 @@ static __always_inline void fpu_std(unsigned short fpr, freg_t *reg) : "memory"); } +static __always_inline void fpu_sfpc(unsigned int fpc) +{ + asm volatile("sfpc %[fpc]" + : + : [fpc] "d" (fpc) + : "memory"); +} + +static __always_inline void fpu_stfpc(unsigned int *fpc) +{ + instrument_write(fpc, sizeof(*fpc)); + asm volatile("stfpc %[fpc]" + : [fpc] "=Q" (*fpc) + : + : "memory"); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_S390_FPU_INSN_H */ diff --git a/arch/s390/kernel/fpu.c b/arch/s390/kernel/fpu.c index f25c54caf32b..6bfd4d0f33e1 100644 --- a/arch/s390/kernel/fpu.c +++ b/arch/s390/kernel/fpu.c @@ -17,10 +17,8 @@ void __kernel_fpu_begin(struct kernel_fpu *state, u32 flags) * in use by the previous context. */ flags &= state->mask; - if (flags & KERNEL_FPC) { - /* Save floating point control */ - asm volatile("stfpc %0" : "=Q" (state->fpc)); - } + if (flags & KERNEL_FPC) + fpu_stfpc(&state->fpc); if (!cpu_has_vx()) { if (flags & KERNEL_VXR_LOW) save_fp_regs(state->fprs); @@ -80,10 +78,8 @@ void __kernel_fpu_end(struct kernel_fpu *state, u32 flags) * current context. */ flags &= state->mask; - if (flags & KERNEL_FPC) { - /* Restore floating-point controls */ - asm volatile("lfpc %0" : : "Q" (state->fpc)); - } + if (flags & KERNEL_FPC) + fpu_lfpc(&state->fpc); if (!cpu_has_vx()) { if (flags & KERNEL_VXR_LOW) load_fp_regs(state->fprs); @@ -176,7 +172,7 @@ void save_fpu_regs(void) state = ¤t->thread.fpu; regs = current->thread.fpu.regs; - asm volatile("stfpc %0" : "=Q" (state->fpc)); + fpu_stfpc(&state->fpc); if (likely(cpu_has_vx())) { asm volatile("lgr 1,%0\n" "VSTM 0,15,0,1\n" diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index e502192da5f7..b0578ea230e7 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -191,7 +191,7 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) void execve_tail(void) { current->thread.fpu.fpc = 0; - asm volatile("sfpc %0" : : "d" (0)); + fpu_sfpc(0); } struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) |