diff options
Diffstat (limited to 'arch/arm64/kernel/fpsimd.c')
-rw-r--r-- | arch/arm64/kernel/fpsimd.c | 105 |
1 files changed, 53 insertions, 52 deletions
diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index e7226c4c7493..87a35364e750 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -39,7 +39,9 @@ #include <linux/slab.h> #include <linux/sysctl.h> +#include <asm/esr.h> #include <asm/fpsimd.h> +#include <asm/cpufeature.h> #include <asm/cputype.h> #include <asm/simd.h> #include <asm/sigcontext.h> @@ -64,7 +66,7 @@ * been loaded into its FPSIMD registers most recently, or whether it has * been used to perform kernel mode NEON in the meantime. * - * For (a), we add a 'cpu' field to struct fpsimd_state, which gets updated to + * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to * the id of the current CPU every time the state is loaded onto a CPU. For (b), * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the * address of the userland FPSIMD state of the task that was loaded onto the CPU @@ -73,7 +75,7 @@ * With this in place, we no longer have to restore the next FPSIMD state right * when switching between tasks. Instead, we can defer this check to userland * resume, at which time we verify whether the CPU's fpsimd_last_state and the - * task's fpsimd_state.cpu are still mutually in sync. If this is the case, we + * task's fpsimd_cpu are still mutually in sync. If this is the case, we * can omit the FPSIMD restore. * * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to @@ -90,14 +92,14 @@ * flag with local_bh_disable() unless softirqs are already masked. * * For a certain task, the sequence may look something like this: - * - the task gets scheduled in; if both the task's fpsimd_state.cpu field + * - the task gets scheduled in; if both the task's fpsimd_cpu field * contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu * variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is * cleared, otherwise it is set; * * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's * userland FPSIMD state is copied from memory to the registers, the task's - * fpsimd_state.cpu field is set to the id of the current CPU, the current + * fpsimd_cpu field is set to the id of the current CPU, the current * CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the * TIF_FOREIGN_FPSTATE flag is cleared; * @@ -115,7 +117,7 @@ * whatever is in the FPSIMD registers is not saved to memory, but discarded. */ struct fpsimd_last_state_struct { - struct fpsimd_state *st; + struct user_fpsimd_state *st; bool sve_in_use; }; @@ -222,7 +224,7 @@ static void sve_user_enable(void) * sets TIF_SVE. * * When stored, FPSIMD registers V0-V31 are encoded in - * task->fpsimd_state; bits [max : 128] for each of Z0-Z31 are + * task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are * logically zero but not stored anywhere; P0-P15 and FFR are not * stored and have unspecified values from userspace's point of * view. For hygiene purposes, the kernel zeroes them on next use, @@ -231,9 +233,9 @@ static void sve_user_enable(void) * task->thread.sve_state does not need to be non-NULL, valid or any * particular size: it must not be dereferenced. * - * * FPSR and FPCR are always stored in task->fpsimd_state irrespctive of - * whether TIF_SVE is clear or set, since these are not vector length - * dependent. + * * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state + * irrespective of whether TIF_SVE is clear or set, since these are + * not vector length dependent. */ /* @@ -251,10 +253,10 @@ static void task_fpsimd_load(void) if (system_supports_sve() && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(current), - ¤t->thread.fpsimd_state.fpsr, + ¤t->thread.uw.fpsimd_state.fpsr, sve_vq_from_vl(current->thread.sve_vl) - 1); else - fpsimd_load_state(¤t->thread.fpsimd_state); + fpsimd_load_state(¤t->thread.uw.fpsimd_state); if (system_supports_sve()) { /* Toggle SVE trapping for userspace if needed */ @@ -285,15 +287,14 @@ static void task_fpsimd_save(void) * re-enter user with corrupt state. * There's no way to recover, so kill it: */ - force_signal_inject( - SIGKILL, 0, current_pt_regs(), 0); + force_signal_inject(SIGKILL, SI_KERNEL, 0); return; } sve_save_state(sve_pffr(current), - ¤t->thread.fpsimd_state.fpsr); + ¤t->thread.uw.fpsimd_state.fpsr); } else - fpsimd_save_state(¤t->thread.fpsimd_state); + fpsimd_save_state(¤t->thread.uw.fpsimd_state); } } @@ -404,20 +405,21 @@ static int __init sve_sysctl_init(void) { return 0; } (SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET)) /* - * Transfer the FPSIMD state in task->thread.fpsimd_state to + * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to * task->thread.sve_state. * * Task can be a non-runnable task, or current. In the latter case, * softirqs (and preemption) must be disabled. * task->thread.sve_state must point to at least sve_state_size(task) * bytes of allocated kernel memory. - * task->thread.fpsimd_state must be up to date before calling this function. + * task->thread.uw.fpsimd_state must be up to date before calling this + * function. */ static void fpsimd_to_sve(struct task_struct *task) { unsigned int vq; void *sst = task->thread.sve_state; - struct fpsimd_state const *fst = &task->thread.fpsimd_state; + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; unsigned int i; if (!system_supports_sve()) @@ -431,7 +433,7 @@ static void fpsimd_to_sve(struct task_struct *task) /* * Transfer the SVE state in task->thread.sve_state to - * task->thread.fpsimd_state. + * task->thread.uw.fpsimd_state. * * Task can be a non-runnable task, or current. In the latter case, * softirqs (and preemption) must be disabled. @@ -443,7 +445,7 @@ static void sve_to_fpsimd(struct task_struct *task) { unsigned int vq; void const *sst = task->thread.sve_state; - struct fpsimd_state *fst = &task->thread.fpsimd_state; + struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state; unsigned int i; if (!system_supports_sve()) @@ -510,7 +512,7 @@ void fpsimd_sync_to_sve(struct task_struct *task) } /* - * Ensure that task->thread.fpsimd_state is up to date with respect to + * Ensure that task->thread.uw.fpsimd_state is up to date with respect to * the user task, irrespective of whether SVE is in use or not. * * This should only be called by ptrace. task must be non-runnable. @@ -525,21 +527,21 @@ void sve_sync_to_fpsimd(struct task_struct *task) /* * Ensure that task->thread.sve_state is up to date with respect to - * the task->thread.fpsimd_state. + * the task->thread.uw.fpsimd_state. * * This should only be called by ptrace to merge new FPSIMD register * values into a task for which SVE is currently active. * task must be non-runnable. * task->thread.sve_state must point to at least sve_state_size(task) * bytes of allocated kernel memory. - * task->thread.fpsimd_state must already have been initialised with + * task->thread.uw.fpsimd_state must already have been initialised with * the new FPSIMD register values to be merged in. */ void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { unsigned int vq; void *sst = task->thread.sve_state; - struct fpsimd_state const *fst = &task->thread.fpsimd_state; + struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state; unsigned int i; if (!test_tsk_thread_flag(task, TIF_SVE)) @@ -757,12 +759,10 @@ fail: * Enable SVE for EL1. * Intended for use by the cpufeatures code during CPU boot. */ -int sve_kernel_enable(void *__always_unused p) +void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p) { write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1); isb(); - - return 0; } void __init sve_setup(void) @@ -831,7 +831,7 @@ asmlinkage void do_sve_acc(unsigned int esr, struct pt_regs *regs) { /* Even if we chose not to use SVE, the hardware could still trap: */ if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) { - force_signal_inject(SIGILL, ILL_ILLOPC, regs, 0); + force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc); return; } @@ -867,18 +867,20 @@ asmlinkage void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs) asmlinkage void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs) { siginfo_t info; - unsigned int si_code = FPE_FIXME; - - if (esr & FPEXC_IOF) - si_code = FPE_FLTINV; - else if (esr & FPEXC_DZF) - si_code = FPE_FLTDIV; - else if (esr & FPEXC_OFF) - si_code = FPE_FLTOVF; - else if (esr & FPEXC_UFF) - si_code = FPE_FLTUND; - else if (esr & FPEXC_IXF) - si_code = FPE_FLTRES; + unsigned int si_code = FPE_FLTUNK; + + if (esr & ESR_ELx_FP_EXC_TFV) { + if (esr & FPEXC_IOF) + si_code = FPE_FLTINV; + else if (esr & FPEXC_DZF) + si_code = FPE_FLTDIV; + else if (esr & FPEXC_OFF) + si_code = FPE_FLTOVF; + else if (esr & FPEXC_UFF) + si_code = FPE_FLTUND; + else if (esr & FPEXC_IXF) + si_code = FPE_FLTRES; + } memset(&info, 0, sizeof(info)); info.si_signo = SIGFPE; @@ -908,10 +910,9 @@ void fpsimd_thread_switch(struct task_struct *next) * the TIF_FOREIGN_FPSTATE flag so the state will be loaded * upon the next return to userland. */ - struct fpsimd_state *st = &next->thread.fpsimd_state; - - if (__this_cpu_read(fpsimd_last_state.st) == st - && st->cpu == smp_processor_id()) + if (__this_cpu_read(fpsimd_last_state.st) == + &next->thread.uw.fpsimd_state + && next->thread.fpsimd_cpu == smp_processor_id()) clear_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); else set_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE); @@ -927,7 +928,8 @@ void fpsimd_flush_thread(void) local_bh_disable(); - memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); + memset(¤t->thread.uw.fpsimd_state, 0, + sizeof(current->thread.uw.fpsimd_state)); fpsimd_flush_task_state(current); if (system_supports_sve()) { @@ -986,7 +988,7 @@ void fpsimd_preserve_current_state(void) /* * Like fpsimd_preserve_current_state(), but ensure that - * current->thread.fpsimd_state is updated so that it can be copied to + * current->thread.uw.fpsimd_state is updated so that it can be copied to * the signal frame. */ void fpsimd_signal_preserve_current_state(void) @@ -1004,11 +1006,10 @@ static void fpsimd_bind_to_cpu(void) { struct fpsimd_last_state_struct *last = this_cpu_ptr(&fpsimd_last_state); - struct fpsimd_state *st = ¤t->thread.fpsimd_state; - last->st = st; + last->st = ¤t->thread.uw.fpsimd_state; last->sve_in_use = test_thread_flag(TIF_SVE); - st->cpu = smp_processor_id(); + current->thread.fpsimd_cpu = smp_processor_id(); } /* @@ -1043,7 +1044,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) local_bh_disable(); - current->thread.fpsimd_state.user_fpsimd = *state; + current->thread.uw.fpsimd_state = *state; if (system_supports_sve() && test_thread_flag(TIF_SVE)) fpsimd_to_sve(current); @@ -1060,7 +1061,7 @@ void fpsimd_update_current_state(struct user_fpsimd_state const *state) */ void fpsimd_flush_task_state(struct task_struct *t) { - t->thread.fpsimd_state.cpu = NR_CPUS; + t->thread.fpsimd_cpu = NR_CPUS; } static inline void fpsimd_flush_cpu_state(void) @@ -1159,7 +1160,7 @@ EXPORT_SYMBOL(kernel_neon_end); #ifdef CONFIG_EFI -static DEFINE_PER_CPU(struct fpsimd_state, efi_fpsimd_state); +static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state); static DEFINE_PER_CPU(bool, efi_fpsimd_state_used); static DEFINE_PER_CPU(bool, efi_sve_state_used); |