diff options
author | Simon Guo <wei.guo.simon@gmail.com> | 2018-05-23 10:01:59 +0300 |
---|---|---|
committer | Paul Mackerras <paulus@ozlabs.org> | 2018-06-01 03:30:00 +0300 |
commit | 13989b65ebb74c05c577dbbcc111e1fdd7da763a (patch) | |
tree | 5430a76d199a1bde726f9de6849a1000e88474b9 /arch/powerpc/kvm/book3s_pr.c | |
parent | 8d2e2fc5e082a7b3f858cefb6e65700f28d2955e (diff) | |
download | linux-13989b65ebb74c05c577dbbcc111e1fdd7da763a.tar.xz |
KVM: PPC: Book3S PR: Add math support for PR KVM HTM
The math registers will be saved into vcpu->arch.fp/vr and corresponding
vcpu->arch.fp_tm/vr_tm area.
We flush or giveup the math regs into vcpu->arch.fp/vr before saving
transaction. After transaction is restored, the math regs will be loaded
back into regs.
If there is a FP/VEC/VSX unavailable exception during transaction active
state, the math checkpoint content might be incorrect and we need to do
treclaim./load the correct checkpoint val/trechkpt. sequence to retry the
transaction. That will make our solution complicated. To solve this issue,
we always make the hardware guest MSR math bits (shadow_msr) consistent
with the MSR val which guest sees (kvmppc_get_msr()) when guest msr is
with tm enabled. Then all FP/VEC/VSX unavailable exception can be delivered
to guest and guest handles the exception by itself.
Signed-off-by: Simon Guo <wei.guo.simon@gmail.com>
Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
Diffstat (limited to 'arch/powerpc/kvm/book3s_pr.c')
-rw-r--r-- | arch/powerpc/kvm/book3s_pr.c | 35 |
1 files changed, 35 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index a14721f034fb..dcb577fde9cd 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c @@ -308,6 +308,28 @@ static inline void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu) tm_disable(); } +/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at + * hardware. + */ +static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu) +{ + ulong exit_nr; + ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) & + (MSR_FP | MSR_VEC | MSR_VSX); + + if (!ext_diff) + return; + + if (ext_diff == MSR_FP) + exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL; + else if (ext_diff == MSR_VEC) + exit_nr = BOOK3S_INTERRUPT_ALTIVEC; + else + exit_nr = BOOK3S_INTERRUPT_VSX; + + kvmppc_handle_ext(vcpu, exit_nr, ext_diff); +} + void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) { if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) { @@ -315,6 +337,8 @@ void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu) return; } + kvmppc_giveup_ext(vcpu, MSR_VSX); + preempt_disable(); _kvmppc_save_tm_pr(vcpu, mfmsr()); preempt_enable(); @@ -324,12 +348,18 @@ void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu) { if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) { kvmppc_restore_tm_sprs(vcpu); + if (kvmppc_get_msr(vcpu) & MSR_TM) + kvmppc_handle_lost_math_exts(vcpu); return; } preempt_disable(); _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu)); preempt_enable(); + + if (kvmppc_get_msr(vcpu) & MSR_TM) + kvmppc_handle_lost_math_exts(vcpu); + } #endif @@ -468,6 +498,11 @@ static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) /* Preload FPU if it's enabled */ if (kvmppc_get_msr(vcpu) & MSR_FP) kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); + +#ifdef CONFIG_PPC_TRANSACTIONAL_MEM + if (kvmppc_get_msr(vcpu) & MSR_TM) + kvmppc_handle_lost_math_exts(vcpu); +#endif } void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |