diff options
author | Anton Blanchard <anton@samba.org> | 2015-10-29 03:44:05 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2015-12-01 05:52:25 +0300 |
commit | dc4fbba11e4661a6a77a1f89ba32f9082e6395ff (patch) | |
tree | 567037dc8e93063615e558eda1c80f8a82154749 /arch/powerpc/kvm | |
parent | a0e72cf12b1a1f159b6822ed2e1e41893d996fc7 (diff) | |
download | linux-dc4fbba11e4661a6a77a1f89ba32f9082e6395ff.tar.xz |
powerpc: Create disable_kernel_{fp,altivec,vsx,spe}()
The enable_kernel_*() functions leave the relevant MSR bits enabled
until we exit the kernel sometime later. Create disable versions
that wrap the kernel use of FP, Altivec VSX or SPE.
While we don't want to disable it normally for performance reasons
(MSR writes are slow), it will be used for a debug boot option that
does this and catches bad uses in other areas of the kernel.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_paired_singles.c | 1 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 4 | ||||
-rw-r--r-- | arch/powerpc/kvm/booke.c | 4 |
3 files changed, 9 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index a759d9adb0b6..eab96cfe82fa 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c @@ -1265,6 +1265,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) if (rcomp) kvmppc_set_cr(vcpu, cr); + disable_kernel_fp(); preempt_enable(); return emulated; diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index 64891b081ad5..49f5dad1bd45 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -751,6 +751,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); t->fp_save_area = &vcpu->arch.fp; preempt_enable(); } @@ -760,6 +761,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); t->vr_save_area = &vcpu->arch.vr; preempt_enable(); #endif @@ -788,6 +790,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); preempt_enable(); } #ifdef CONFIG_ALTIVEC @@ -795,6 +798,7 @@ static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); preempt_enable(); } #endif diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index fd5875179e5c..778ef86e187e 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c @@ -98,6 +98,7 @@ void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_spe(); kvmppc_save_guest_spe(vcpu); + disable_kernel_spe(); vcpu->arch.shadow_msr &= ~MSR_SPE; preempt_enable(); } @@ -107,6 +108,7 @@ static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) preempt_disable(); enable_kernel_spe(); kvmppc_load_guest_spe(vcpu); + disable_kernel_spe(); vcpu->arch.shadow_msr |= MSR_SPE; preempt_enable(); } @@ -141,6 +143,7 @@ static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) if (!(current->thread.regs->msr & MSR_FP)) { enable_kernel_fp(); load_fp_state(&vcpu->arch.fp); + disable_kernel_fp(); current->thread.fp_save_area = &vcpu->arch.fp; current->thread.regs->msr |= MSR_FP; } @@ -182,6 +185,7 @@ static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) if (!(current->thread.regs->msr & MSR_VEC)) { enable_kernel_altivec(); load_vr_state(&vcpu->arch.vr); + disable_kernel_altivec(); current->thread.vr_save_area = &vcpu->arch.vr; current->thread.regs->msr |= MSR_VEC; } |