diff options
author | Vitaly Kuznetsov <vkuznets@redhat.com> | 2019-08-13 16:53:35 +0300 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2019-08-22 11:09:22 +0300 |
commit | c8e16b78c6142afea428dc316c900644951cf7f3 (patch) | |
tree | 8f394f9f0619860d30a3954580420f80c2e899e9 /arch | |
parent | e7134c1bb5af1ca6dcbd983e036e67b82e6a2de4 (diff) | |
download | linux-c8e16b78c6142afea428dc316c900644951cf7f3.tar.xz |
x86: KVM: svm: eliminate hardcoded RIP advancement from vmrun_interception()
Just like we do with other intercepts, in vmrun_interception() we should be
doing kvm_skip_emulated_instruction() and not just RIP += 3. Also, it is
wrong to increment RIP before nested_svm_vmrun() as it can result in
kvm_inject_gp().
We can't call kvm_skip_emulated_instruction() after nested_svm_vmrun() so
move it inside.
Suggested-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Reviewed-by: Sean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/svm.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 248ea13d6853..6708bb82d8a8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3585,7 +3585,7 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, static int nested_svm_vmrun(struct vcpu_svm *svm) { - int rc; + int ret; struct vmcb *nested_vmcb; struct vmcb *hsave = svm->nested.hsave; struct vmcb *vmcb = svm->vmcb; @@ -3594,13 +3594,16 @@ static int nested_svm_vmrun(struct vcpu_svm *svm) vmcb_gpa = svm->vmcb->save.rax; - rc = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); - if (rc) { - if (rc == -EINVAL) - kvm_inject_gp(&svm->vcpu, 0); + ret = kvm_vcpu_map(&svm->vcpu, gpa_to_gfn(vmcb_gpa), &map); + if (ret == EINVAL) { + kvm_inject_gp(&svm->vcpu, 0); return 1; + } else if (ret) { + return kvm_skip_emulated_instruction(&svm->vcpu); } + ret = kvm_skip_emulated_instruction(&svm->vcpu); + nested_vmcb = map.hva; if (!nested_vmcb_checks(nested_vmcb)) { @@ -3611,7 +3614,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm) kvm_vcpu_unmap(&svm->vcpu, &map, true); - return 1; + return ret; } trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, @@ -3664,7 +3667,7 @@ static int nested_svm_vmrun(struct vcpu_svm *svm) nested_svm_vmexit(svm); } - return 1; + return ret; } static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb) @@ -3740,9 +3743,6 @@ static int vmrun_interception(struct vcpu_svm *svm) if (nested_svm_check_permissions(svm)) return 1; - /* Save rip after vmrun instruction */ - kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3); - return nested_svm_vmrun(svm); } |