diff options
author | Anton Blanchard <anton@samba.org> | 2015-10-29 03:44:07 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-01 05:52:26 +0300 |
commit | 1f2e25b2d552cade43eacb2edc4e7f01c1cfecb3 (patch) | |
tree | c5176a84fe7cfbab1c964015a210b627cf0d879e /arch/powerpc/kernel | |
parent | 3eb5d5888dc68c9b187998ca4249b8b9fa481eeb (diff) | |
download | linux-1f2e25b2d552cade43eacb2edc4e7f01c1cfecb3.tar.xz |
powerpc: Remove fp_enable() and vec_enable(), use msr_check_and_{set, clear}()
More consolidation of our MSR available bit handling.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r-- | arch/powerpc/kernel/fpu.S | 16 | ||||
-rw-r--r-- | arch/powerpc/kernel/process.c | 6 | ||||
-rw-r--r-- | arch/powerpc/kernel/vector.S | 10 |
3 files changed, 4 insertions, 28 deletions
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index 431ab571ed1b..2117eaca3d28 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S @@ -77,22 +77,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ /* - * Enable use of the FPU, and VSX if possible, for the caller. - */ -_GLOBAL(fp_enable) - mfmsr r3 - ori r3,r3,MSR_FP -#ifdef CONFIG_VSX -BEGIN_FTR_SECTION - oris r3,r3,MSR_VSX@h -END_FTR_SECTION_IFSET(CPU_FTR_VSX) -#endif - SYNC - MTMSRD(r3) - isync /* (not necessary for arch 2.02 and later) */ - blr - -/* * Load state from memory into FP registers including FPSCR. * Assumes the caller has enabled FP in the MSR. */ diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 1eafceefeac9..9f8444b84dde 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -732,13 +732,15 @@ void restore_tm_state(struct pt_regs *regs) msr_diff = current->thread.ckpt_regs.msr & ~regs->msr; msr_diff &= MSR_FP | MSR_VEC | MSR_VSX; if (msr_diff & MSR_FP) { - fp_enable(); + msr_check_and_set(MSR_FP); load_fp_state(¤t->thread.fp_state); + msr_check_and_clear(MSR_FP); regs->msr |= current->thread.fpexc_mode; } if (msr_diff & MSR_VEC) { - vec_enable(); + msr_check_and_set(MSR_VEC); load_vr_state(¤t->thread.vr_state); + msr_check_and_clear(MSR_VEC); } regs->msr |= msr_diff; } diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index 98675b08efe2..162d0f714941 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S @@ -33,16 +33,6 @@ _GLOBAL(do_load_up_transact_altivec) #endif /* - * Enable use of VMX/Altivec for the caller. - */ -_GLOBAL(vec_enable) - mfmsr r3 - oris r3,r3,MSR_VEC@h - MTMSRD(r3) - isync - blr - -/* * Load state from memory into VMX registers including VSCR. * Assumes the caller has enabled VMX in the MSR. */ |