summaryrefslogtreecommitdiff
path: root/drivers/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-10-09 14:12:19 +0400
committerAvi Kivity <avi@qumranet.com>2007-10-22 14:03:28 +0400
commit1b6269db3f83396c2fd2c8d0f3e0f37ac0e6ba05 (patch)
treea8e99f6f6690e2024b9a08096a61d884b57dc8bc /drivers/kvm
parent7f2145ad6f3e7060147a2a4c4db35c641ff61b5c (diff)
downloadlinux-1b6269db3f83396c2fd2c8d0f3e0f37ac0e6ba05.tar.xz
KVM: VMX: Handle NMIs before enabling interrupts and preemption
This makes sure we handle NMI on the current cpu, and that we don't service maskable interrupts before non-maskable ones. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm')
-rw-r--r--drivers/kvm/vmx.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/drivers/kvm/vmx.c b/drivers/kvm/vmx.c
index 4f115a8e45ef..bcc1e398a976 100644
--- a/drivers/kvm/vmx.c
+++ b/drivers/kvm/vmx.c
@@ -1760,10 +1760,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
set_bit(irq / BITS_PER_LONG, &vcpu->irq_summary);
}
- if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */
- asm ("int $2");
- return 1;
- }
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ return 1; /* already handled by vmx_vcpu_run() */
if (is_no_device(intr_info)) {
vmx_fpu_activate(vcpu);
@@ -2196,6 +2194,7 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu)
static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
+ u32 intr_info;
/*
* Loading guest fpu may have cleared host cr0.ts
@@ -2322,6 +2321,12 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
asm ("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
vmx->launched = 1;
+
+ intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ /* We need to handle NMIs before interrupts are enabled */
+ if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */
+ asm("int $2");
}
static void vmx_inject_page_fault(struct kvm_vcpu *vcpu,