summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/vmx/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx/vmx.c')
-rw-r--r--arch/x86/kvm/vmx/vmx.c576
1 files changed, 380 insertions, 196 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 89c766fad889..36c771728c8c 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -437,6 +437,11 @@ static const struct kvm_vmx_segment_field {
VMX_SEGMENT_FIELD(LDTR),
};
+static inline void vmx_segment_cache_clear(struct vcpu_vmx *vmx)
+{
+ vmx->segment_cache.bitmask = 0;
+}
+
static unsigned long host_idt_base;
/*
@@ -1306,10 +1311,12 @@ after_clear_sn:
pi_set_on(pi_desc);
}
-void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
+void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
+ struct loaded_vmcs *buddy)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
bool already_loaded = vmx->loaded_vmcs->cpu == cpu;
+ struct vmcs *prev;
if (!already_loaded) {
loaded_vmcs_clear(vmx->loaded_vmcs);
@@ -1328,16 +1335,28 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
local_irq_enable();
}
- if (per_cpu(current_vmcs, cpu) != vmx->loaded_vmcs->vmcs) {
+ prev = per_cpu(current_vmcs, cpu);
+ if (prev != vmx->loaded_vmcs->vmcs) {
per_cpu(current_vmcs, cpu) = vmx->loaded_vmcs->vmcs;
vmcs_load(vmx->loaded_vmcs->vmcs);
- indirect_branch_prediction_barrier();
+
+ /*
+ * No indirect branch prediction barrier needed when switching
+ * the active VMCS within a guest, e.g. on nested VM-Enter.
+ * The L1 VMM can protect itself with retpolines, IBPB or IBRS.
+ */
+ if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
+ indirect_branch_prediction_barrier();
}
if (!already_loaded) {
void *gdt = get_current_gdt_ro();
unsigned long sysenter_esp;
+ /*
+ * Flush all EPTP/VPID contexts, the new pCPU may have stale
+ * TLB entries from its previous association with the vCPU.
+ */
kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
/*
@@ -1364,11 +1383,11 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu)
* Switches to specified vcpu, until a matching vcpu_put(), but assumes
* vcpu mutex is already taken.
*/
-void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
- vmx_vcpu_load_vmcs(vcpu, cpu);
+ vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
vmx_vcpu_pi_load(vcpu, cpu);
@@ -1546,7 +1565,7 @@ static int vmx_rtit_ctl_check(struct kvm_vcpu *vcpu, u64 data)
static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
{
- unsigned long rip;
+ unsigned long rip, orig_rip;
/*
* Using VMCS.VM_EXIT_INSTRUCTION_LEN on EPT misconfig depends on
@@ -1558,8 +1577,17 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
*/
if (!static_cpu_has(X86_FEATURE_HYPERVISOR) ||
to_vmx(vcpu)->exit_reason != EXIT_REASON_EPT_MISCONFIG) {
- rip = kvm_rip_read(vcpu);
- rip += vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+ orig_rip = kvm_rip_read(vcpu);
+ rip = orig_rip + vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
+#ifdef CONFIG_X86_64
+ /*
+ * We need to mask out the high 32 bits of RIP if not in 64-bit
+ * mode, but just finding out that we are in 64-bit mode is
+ * quite expensive. Only do it if there was a carry.
+ */
+ if (unlikely(((rip ^ orig_rip) >> 31) == 3) && !is_64_bit_mode(vcpu))
+ rip = (u32)rip;
+#endif
kvm_rip_write(vcpu, rip);
} else {
if (!kvm_emulate_instruction(vcpu, EMULTYPE_SKIP))
@@ -1572,6 +1600,32 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}
+/*
+ * Handles kvm_read/write_guest_virt*() result and either injects #PF or returns
+ * KVM_EXIT_INTERNAL_ERROR for cases not currently handled by KVM. Return value
+ * indicates whether exit to userspace is needed.
+ */
+int vmx_handle_memory_failure(struct kvm_vcpu *vcpu, int r,
+ struct x86_exception *e)
+{
+ if (r == X86EMUL_PROPAGATE_FAULT) {
+ kvm_inject_emulated_page_fault(vcpu, e);
+ return 1;
+ }
+
+ /*
+ * In case kvm_read/write_guest_virt*() failed with X86EMUL_IO_NEEDED
+ * while handling a VMX instruction KVM could've handled the request
+ * correctly by exiting to userspace and performing I/O but there
+ * doesn't seem to be a real use-case behind such requests, just return
+ * KVM_EXIT_INTERNAL_ERROR for now.
+ */
+ vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
+ vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
+ vcpu->run->internal.ndata = 0;
+
+ return 0;
+}
/*
* Recognizes a pending MTF VM-exit and records the nested state for later
@@ -1712,17 +1766,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
vmx_update_msr_bitmap(&vmx->vcpu);
}
-static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
-{
- struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-
- if (is_guest_mode(vcpu) &&
- (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
- return vcpu->arch.tsc_offset - vmcs12->tsc_offset;
-
- return vcpu->arch.tsc_offset;
-}
-
static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
{
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
@@ -1771,6 +1814,9 @@ static int vmx_get_msr_feature(struct kvm_msr_entry *msr)
if (!nested)
return 1;
return vmx_get_vmx_msr(&vmcs_config.nested, msr->index, &msr->data);
+ case MSR_IA32_PERF_CAPABILITIES:
+ msr->data = vmx_get_perf_capabilities();
+ return 0;
default:
return 1;
}
@@ -1926,6 +1972,16 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
return 0;
}
+static u64 nested_vmx_truncate_sysenter_addr(struct kvm_vcpu *vcpu,
+ u64 data)
+{
+#ifdef CONFIG_X86_64
+ if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+ return (u32)data;
+#endif
+ return (unsigned long)data;
+}
+
/*
* Writes msr value into the appropriate "register".
* Returns 0 on success, non-0 otherwise.
@@ -1963,13 +2019,17 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
vmcs_write32(GUEST_SYSENTER_CS, data);
break;
case MSR_IA32_SYSENTER_EIP:
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
+ data = nested_vmx_truncate_sysenter_addr(vcpu, data);
get_vmcs12(vcpu)->guest_sysenter_eip = data;
+ }
vmcs_writel(GUEST_SYSENTER_EIP, data);
break;
case MSR_IA32_SYSENTER_ESP:
- if (is_guest_mode(vcpu))
+ if (is_guest_mode(vcpu)) {
+ data = nested_vmx_truncate_sysenter_addr(vcpu, data);
get_vmcs12(vcpu)->guest_sysenter_esp = data;
+ }
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_DEBUGCTLMSR:
@@ -2187,6 +2247,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
{
+ unsigned long guest_owned_bits;
+
kvm_register_mark_available(vcpu, reg);
switch (reg) {
@@ -2200,10 +2262,22 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
if (enable_ept)
ept_save_pdptrs(vcpu);
break;
+ case VCPU_EXREG_CR0:
+ guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+
+ vcpu->arch.cr0 &= ~guest_owned_bits;
+ vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & guest_owned_bits;
+ break;
case VCPU_EXREG_CR3:
if (enable_unrestricted_guest || (enable_ept && is_paging(vcpu)))
vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
break;
+ case VCPU_EXREG_CR4:
+ guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+
+ vcpu->arch.cr4 &= ~guest_owned_bits;
+ vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & guest_owned_bits;
+ break;
default:
WARN_ON_ONCE(1);
break;
@@ -2837,34 +2911,64 @@ static void exit_lmode(struct kvm_vcpu *vcpu)
#endif
-static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
+static void vmx_flush_tlb_all(struct kvm_vcpu *vcpu)
{
- int vpid = to_vmx(vcpu)->vpid;
-
- if (!vpid_sync_vcpu_addr(vpid, addr))
- vpid_sync_context(vpid);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
/*
- * If VPIDs are not supported or enabled, then the above is a no-op.
- * But we don't really need a TLB flush in that case anyway, because
- * each VM entry/exit includes an implicit flush when VPID is 0.
+ * INVEPT must be issued when EPT is enabled, irrespective of VPID, as
+ * the CPU is not required to invalidate guest-physical mappings on
+ * VM-Entry, even if VPID is disabled. Guest-physical mappings are
+ * associated with the root EPT structure and not any particular VPID
+ * (INVVPID also isn't required to invalidate guest-physical mappings).
*/
+ if (enable_ept) {
+ ept_sync_global();
+ } else if (enable_vpid) {
+ if (cpu_has_vmx_invvpid_global()) {
+ vpid_sync_vcpu_global();
+ } else {
+ vpid_sync_vcpu_single(vmx->vpid);
+ vpid_sync_vcpu_single(vmx->nested.vpid02);
+ }
+ }
}
-static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
+static void vmx_flush_tlb_current(struct kvm_vcpu *vcpu)
{
- ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits;
+ u64 root_hpa = vcpu->arch.mmu->root_hpa;
- vcpu->arch.cr0 &= ~cr0_guest_owned_bits;
- vcpu->arch.cr0 |= vmcs_readl(GUEST_CR0) & cr0_guest_owned_bits;
+ /* No flush required if the current context is invalid. */
+ if (!VALID_PAGE(root_hpa))
+ return;
+
+ if (enable_ept)
+ ept_sync_context(construct_eptp(vcpu, root_hpa));
+ else if (!is_guest_mode(vcpu))
+ vpid_sync_context(to_vmx(vcpu)->vpid);
+ else
+ vpid_sync_context(nested_get_vpid02(vcpu));
}
-static void vmx_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
+static void vmx_flush_tlb_gva(struct kvm_vcpu *vcpu, gva_t addr)
{
- ulong cr4_guest_owned_bits = vcpu->arch.cr4_guest_owned_bits;
+ /*
+ * vpid_sync_vcpu_addr() is a nop if vmx->vpid==0, see the comment in
+ * vmx_flush_tlb_guest() for an explanation of why this is ok.
+ */
+ vpid_sync_vcpu_addr(to_vmx(vcpu)->vpid, addr);
+}
- vcpu->arch.cr4 &= ~cr4_guest_owned_bits;
- vcpu->arch.cr4 |= vmcs_readl(GUEST_CR4) & cr4_guest_owned_bits;
+static void vmx_flush_tlb_guest(struct kvm_vcpu *vcpu)
+{
+ /*
+ * vpid_sync_context() is a nop if vmx->vpid==0, e.g. if enable_vpid==0
+ * or a vpid couldn't be allocated for this vCPU. VM-Enter and VM-Exit
+ * are required to flush GVA->{G,H}PA mappings from the TLB if vpid is
+ * disabled (VM-Enter with vpid enabled and vpid==0 is disallowed),
+ * i.e. no explicit INVVPID is necessary.
+ */
+ vpid_sync_context(to_vmx(vcpu)->vpid);
}
static void ept_load_pdptrs(struct kvm_vcpu *vcpu)
@@ -2886,12 +2990,13 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu)
{
struct kvm_mmu *mmu = vcpu->arch.walk_mmu;
- if (is_pae_paging(vcpu)) {
- mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
- mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
- mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
- mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
- }
+ if (WARN_ON_ONCE(!is_pae_paging(vcpu)))
+ return;
+
+ mmu->pdptrs[0] = vmcs_read64(GUEST_PDPTR0);
+ mmu->pdptrs[1] = vmcs_read64(GUEST_PDPTR1);
+ mmu->pdptrs[2] = vmcs_read64(GUEST_PDPTR2);
+ mmu->pdptrs[3] = vmcs_read64(GUEST_PDPTR3);
kvm_register_mark_dirty(vcpu, VCPU_EXREG_PDPTR);
}
@@ -2955,20 +3060,27 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmcs_writel(CR0_READ_SHADOW, cr0);
vmcs_writel(GUEST_CR0, hw_cr0);
vcpu->arch.cr0 = cr0;
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR0);
/* depends on vcpu->arch.cr0 to be set to a new value */
vmx->emulation_required = emulation_required(vcpu);
}
-static int get_ept_level(struct kvm_vcpu *vcpu)
+static int vmx_get_tdp_level(struct kvm_vcpu *vcpu)
{
- if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
- return vmx_eptp_page_walk_level(nested_ept_get_eptp(vcpu));
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5;
return 4;
}
+static int get_ept_level(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
+ return vmx_eptp_page_walk_level(nested_ept_get_eptp(vcpu));
+
+ return vmx_get_tdp_level(vcpu);
+}
+
u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
{
u64 eptp = VMX_EPTP_MT_WB;
@@ -2983,16 +3095,15 @@ u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
return eptp;
}
-void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
+void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long pgd)
{
struct kvm *kvm = vcpu->kvm;
bool update_guest_cr3 = true;
unsigned long guest_cr3;
u64 eptp;
- guest_cr3 = cr3;
if (enable_ept) {
- eptp = construct_eptp(vcpu, cr3);
+ eptp = construct_eptp(vcpu, pgd);
vmcs_write64(EPT_POINTER, eptp);
if (kvm_x86_ops.tlb_remote_flush) {
@@ -3003,16 +3114,15 @@ void vmx_load_mmu_pgd(struct kvm_vcpu *vcpu, unsigned long cr3)
spin_unlock(&to_kvm_vmx(kvm)->ept_pointer_lock);
}
- /* Loading vmcs02.GUEST_CR3 is handled by nested VM-Enter. */
- if (is_guest_mode(vcpu))
- update_guest_cr3 = false;
- else if (!enable_unrestricted_guest && !is_paging(vcpu))
+ if (!enable_unrestricted_guest && !is_paging(vcpu))
guest_cr3 = to_kvm_vmx(kvm)->ept_identity_map_addr;
else if (test_bit(VCPU_EXREG_CR3, (ulong *)&vcpu->arch.regs_avail))
guest_cr3 = vcpu->arch.cr3;
else /* vmcs01.GUEST_CR3 is already up-to-date. */
update_guest_cr3 = false;
ept_load_pdptrs(vcpu);
+ } else {
+ guest_cr3 = pgd;
}
if (update_guest_cr3)
@@ -3063,6 +3173,7 @@ int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
return 1;
vcpu->arch.cr4 = cr4;
+ kvm_register_mark_available(vcpu, VCPU_EXREG_CR4);
if (!enable_unrestricted_guest) {
if (enable_ept) {
@@ -3851,7 +3962,8 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
if (pi_test_and_set_on(&vmx->pi_desc))
return 0;
- if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
+ if (vcpu != kvm_get_running_vcpu() &&
+ !kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
return 0;
@@ -4147,8 +4259,7 @@ static void ept_set_mmio_spte_mask(void)
* EPT Misconfigurations can be generated if the value of bits 2:0
* of an EPT paging-structure entry is 110b (write/execute).
*/
- kvm_mmu_set_mmio_spte_mask(VMX_EPT_RWX_MASK,
- VMX_EPT_MISCONFIG_WX_VALUE, 0);
+ kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 0);
}
#define VMX_XSS_EXIT_BITMAP 0
@@ -4453,31 +4564,54 @@ void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
}
}
-static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
+bool vmx_nmi_blocked(struct kvm_vcpu *vcpu)
{
- if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
+ if (is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
+ return false;
- if (!enable_vnmi &&
- to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
- return 0;
+ if (!enable_vnmi && to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
+ return true;
- return !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI
- | GUEST_INTR_STATE_NMI));
+ return (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ (GUEST_INTR_STATE_MOV_SS | GUEST_INTR_STATE_STI |
+ GUEST_INTR_STATE_NMI));
}
-static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
+static int vmx_nmi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
if (to_vmx(vcpu)->nested.nested_run_pending)
- return false;
+ return -EBUSY;
+
+ /* An NMI must not be injected into L2 if it's supposed to VM-Exit. */
+ if (for_injection && is_guest_mode(vcpu) && nested_exit_on_nmi(vcpu))
+ return -EBUSY;
+ return !vmx_nmi_blocked(vcpu);
+}
+
+bool vmx_interrupt_blocked(struct kvm_vcpu *vcpu)
+{
if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
- return true;
+ return false;
- return (vmcs_readl(GUEST_RFLAGS) & X86_EFLAGS_IF) &&
- !(vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
- (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
+ return !(vmx_get_rflags(vcpu) & X86_EFLAGS_IF) ||
+ (vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) &
+ (GUEST_INTR_STATE_STI | GUEST_INTR_STATE_MOV_SS));
+}
+
+static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection)
+{
+ if (to_vmx(vcpu)->nested.nested_run_pending)
+ return -EBUSY;
+
+ /*
+ * An IRQ must not be injected into L2 if it's supposed to VM-Exit,
+ * e.g. if the IRQ arrived asynchronously after checking nested events.
+ */
+ if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu))
+ return -EBUSY;
+
+ return !vmx_interrupt_blocked(vcpu);
}
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -4518,10 +4652,8 @@ static bool rmode_exception(struct kvm_vcpu *vcpu, int vec)
return false;
/* fall through */
case DB_VECTOR:
- if (vcpu->guest_debug &
- (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
- return false;
- /* fall through */
+ return !(vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP));
case DE_VECTOR:
case OF_VECTOR:
case BR_VECTOR:
@@ -4577,7 +4709,7 @@ static void kvm_machine_check(void)
.flags = X86_EFLAGS_IF,
};
- do_machine_check(&regs, 0);
+ do_machine_check(&regs);
#endif
}
@@ -4616,7 +4748,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
u32 vect_info;
vect_info = vmx->idt_vectoring_info;
- intr_info = vmx->exit_intr_info;
+ intr_info = vmx_get_intr_info(vcpu);
if (is_machine_check(intr_info) || is_nmi(intr_info))
return 1; /* handled by handle_exception_nmi_irqoff() */
@@ -4660,9 +4792,9 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
}
if (is_page_fault(intr_info)) {
- cr2 = vmcs_readl(EXIT_QUALIFICATION);
+ cr2 = vmx_get_exit_qual(vcpu);
/* EPT won't cause page fault directly */
- WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept);
+ WARN_ON_ONCE(!vcpu->arch.apf.host_apf_flags && enable_ept);
return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0);
}
@@ -4673,7 +4805,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu)
switch (ex_no) {
case DB_VECTOR:
- dr6 = vmcs_readl(EXIT_QUALIFICATION);
+ dr6 = vmx_get_exit_qual(vcpu);
if (!(vcpu->guest_debug &
(KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
if (is_icebp(intr_info))
@@ -4740,7 +4872,7 @@ static int handle_io(struct kvm_vcpu *vcpu)
int size, in, string;
unsigned port;
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
string = (exit_qualification & 16) != 0;
++vcpu->stat.io_exits;
@@ -4831,7 +4963,7 @@ static int handle_cr(struct kvm_vcpu *vcpu)
int err;
int ret;
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
cr = exit_qualification & 15;
reg = (exit_qualification >> 8) & 15;
switch ((exit_qualification >> 4) & 3) {
@@ -4908,7 +5040,7 @@ static int handle_dr(struct kvm_vcpu *vcpu)
unsigned long exit_qualification;
int dr, dr7, reg;
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
/* First, if DR does not exist, trigger UD */
@@ -5010,7 +5142,7 @@ static int handle_invd(struct kvm_vcpu *vcpu)
static int handle_invlpg(struct kvm_vcpu *vcpu)
{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
kvm_mmu_invlpg(vcpu, exit_qualification);
return kvm_skip_emulated_instruction(vcpu);
@@ -5042,7 +5174,7 @@ static int handle_xsetbv(struct kvm_vcpu *vcpu)
static int handle_apic_access(struct kvm_vcpu *vcpu)
{
if (likely(fasteoi)) {
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
int access_type, offset;
access_type = exit_qualification & APIC_ACCESS_TYPE;
@@ -5063,7 +5195,7 @@ static int handle_apic_access(struct kvm_vcpu *vcpu)
static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
int vector = exit_qualification & 0xff;
/* EOI-induced VM exit is trap-like and thus no need to adjust IP */
@@ -5073,7 +5205,7 @@ static int handle_apic_eoi_induced(struct kvm_vcpu *vcpu)
static int handle_apic_write(struct kvm_vcpu *vcpu)
{
- unsigned long exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
u32 offset = exit_qualification & 0xfff;
/* APIC-write VM exit is trap-like and thus no need to adjust IP */
@@ -5094,7 +5226,7 @@ static int handle_task_switch(struct kvm_vcpu *vcpu)
idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK);
type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK);
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
reason = (u32)exit_qualification >> 30;
if (reason == TASK_SWITCH_GATE && idt_v) {
@@ -5144,7 +5276,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa_t gpa;
u64 error_code;
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
/*
* EPT violation happened while executing iret from NMI,
@@ -5216,18 +5348,11 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
bool intr_window_requested;
unsigned count = 130;
- /*
- * We should never reach the point where we are emulating L2
- * due to invalid guest state as that means we incorrectly
- * allowed a nested VMEntry with an invalid vmcs12.
- */
- WARN_ON_ONCE(vmx->emulation_required && vmx->nested.nested_run_pending);
-
intr_window_requested = exec_controls_get(vmx) &
CPU_BASED_INTR_WINDOW_EXITING;
while (vmx->emulation_required && count-- != 0) {
- if (intr_window_requested && vmx_interrupt_allowed(vcpu))
+ if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
return handle_interrupt_window(&vmx->vcpu);
if (kvm_test_request(KVM_REQ_EVENT, vcpu))
@@ -5387,6 +5512,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
u64 pcid;
u64 gla;
} operand;
+ int r;
if (!guest_cpuid_has(vcpu, X86_FEATURE_INVPCID)) {
kvm_queue_exception(vcpu, UD_VECTOR);
@@ -5404,15 +5530,14 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
/* According to the Intel instruction reference, the memory operand
* is read even if it isn't needed (e.g., for type==all)
*/
- if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
+ if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
vmx_instruction_info, false,
sizeof(operand), &gva))
return 1;
- if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
- kvm_inject_page_fault(vcpu, &e);
- return 1;
- }
+ r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
+ if (r != X86EMUL_CONTINUE)
+ return vmx_handle_memory_failure(vcpu, r, &e);
if (operand.pcid >> 12 != 0) {
kvm_inject_gp(vcpu, 0);
@@ -5439,11 +5564,11 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
if (kvm_get_active_pcid(vcpu) == operand.pcid) {
kvm_mmu_sync_roots(vcpu);
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++)
- if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].cr3)
+ if (kvm_get_pcid(vcpu, vcpu->arch.mmu->prev_roots[i].pgd)
== operand.pcid)
roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
@@ -5480,7 +5605,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
trace_kvm_pml_full(vcpu->vcpu_id);
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+ exit_qualification = vmx_get_exit_qual(vcpu);
/*
* PML buffer FULL happened while executing iret from NMI,
@@ -5499,14 +5624,22 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
return 1;
}
-static int handle_preemption_timer(struct kvm_vcpu *vcpu)
+static fastpath_t handle_fastpath_preemption_timer(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
if (!vmx->req_immediate_exit &&
- !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled))
+ !unlikely(vmx->loaded_vmcs->hv_timer_soft_disabled)) {
kvm_lapic_expired_hv_timer(vcpu);
+ return EXIT_FASTPATH_REENTER_GUEST;
+ }
+ return EXIT_FASTPATH_NONE;
+}
+
+static int handle_preemption_timer(struct kvm_vcpu *vcpu)
+{
+ handle_fastpath_preemption_timer(vcpu);
return 1;
}
@@ -5594,8 +5727,8 @@ static const int kvm_vmx_max_exit_handlers =
static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
{
- *info1 = vmcs_readl(EXIT_QUALIFICATION);
- *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
+ *info1 = vmx_get_exit_qual(vcpu);
+ *info2 = vmx_get_intr_info(vcpu);
}
static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
@@ -5677,7 +5810,6 @@ void dump_vmcs(void)
u32 cpu_based_exec_ctrl, pin_based_exec_ctrl, secondary_exec_control;
unsigned long cr4;
u64 efer;
- int i, n;
if (!dump_invalid_vmcs) {
pr_warn_ratelimited("set kvm_intel.dump_invalid_vmcs=1 to dump internal KVM state.\n");
@@ -5814,14 +5946,6 @@ void dump_vmcs(void)
pr_err("PostedIntrVec = 0x%02x\n", vmcs_read16(POSTED_INTR_NV));
if ((secondary_exec_control & SECONDARY_EXEC_ENABLE_EPT))
pr_err("EPT pointer = 0x%016llx\n", vmcs_read64(EPT_POINTER));
- n = vmcs_read32(CR3_TARGET_COUNT);
- for (i = 0; i + 1 < n; i += 4)
- pr_err("CR3 target%u=%016lx target%u=%016lx\n",
- i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2),
- i + 1, vmcs_readl(CR3_TARGET_VALUE0 + i * 2 + 2));
- if (i < n)
- pr_err("CR3 target%u=%016lx\n",
- i, vmcs_readl(CR3_TARGET_VALUE0 + i * 2));
if (secondary_exec_control & SECONDARY_EXEC_PAUSE_LOOP_EXITING)
pr_err("PLE Gap=%08x Window=%08x\n",
vmcs_read32(PLE_GAP), vmcs_read32(PLE_WINDOW));
@@ -5834,15 +5958,12 @@ void dump_vmcs(void)
* The guest has exited. See if we can fix it or if we need userspace
* assistance.
*/
-static int vmx_handle_exit(struct kvm_vcpu *vcpu,
- enum exit_fastpath_completion exit_fastpath)
+static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
u32 exit_reason = vmx->exit_reason;
u32 vectoring_info = vmx->idt_vectoring_info;
- trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
-
/*
* Flush logged GPAs PML buffer, this will make dirty_bitmap more
* updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before
@@ -5853,6 +5974,14 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
if (enable_pml)
vmx_flush_pml_buffer(vcpu);
+ /*
+ * We should never reach this point with a pending nested VM-Enter, and
+ * more specifically emulation of L2 due to invalid guest state (see
+ * below) should never happen as that means we incorrectly allowed a
+ * nested VM-Enter with an invalid vmcs12.
+ */
+ WARN_ON_ONCE(vmx->nested.nested_run_pending);
+
/* If guest state is invalid, start emulating */
if (vmx->emulation_required)
return handle_invalid_guest_state(vcpu);
@@ -5871,8 +6000,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
*/
nested_mark_vmcs12_pages_dirty(vcpu);
- if (nested_vmx_exit_reflected(vcpu, exit_reason))
- return nested_vmx_reflect_vmexit(vcpu, exit_reason);
+ if (nested_vmx_reflect_vmexit(vcpu))
+ return 1;
}
if (exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY) {
@@ -5919,7 +6048,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked)) {
- if (vmx_interrupt_allowed(vcpu)) {
+ if (!vmx_interrupt_blocked(vcpu)) {
vmx->loaded_vmcs->soft_vnmi_blocked = 0;
} else if (vmx->loaded_vmcs->vnmi_blocked_time > 1000000000LL &&
vcpu->arch.nmi_pending) {
@@ -5936,10 +6065,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
}
}
- if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
- kvm_skip_emulated_instruction(vcpu);
+ if (exit_fastpath != EXIT_FASTPATH_NONE)
return 1;
- }
if (exit_reason >= kvm_vmx_max_exit_handlers)
goto unexpected_vmexit;
@@ -6093,7 +6220,15 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
if (flexpriority_enabled) {
sec_exec_control |=
SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
- vmx_flush_tlb(vcpu, true);
+ kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
+
+ /*
+ * Flush the TLB, reloading the APIC access page will
+ * only do so if its physical address has changed, but
+ * the guest may have inserted a non-APIC mapping into
+ * the TLB while the APIC access page was disabled.
+ */
+ kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
}
break;
case LAPIC_MODE_X2APIC:
@@ -6107,12 +6242,32 @@ void vmx_set_virtual_apic_mode(struct kvm_vcpu *vcpu)
vmx_update_msr_bitmap(vcpu);
}
-static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa)
+static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu)
{
- if (!is_guest_mode(vcpu)) {
- vmcs_write64(APIC_ACCESS_ADDR, hpa);
- vmx_flush_tlb(vcpu, true);
+ struct page *page;
+
+ /* Defer reload until vmcs01 is the current VMCS. */
+ if (is_guest_mode(vcpu)) {
+ to_vmx(vcpu)->nested.reload_vmcs01_apic_access_page = true;
+ return;
}
+
+ if (!(secondary_exec_controls_get(to_vmx(vcpu)) &
+ SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES))
+ return;
+
+ page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+ if (is_error_page(page))
+ return;
+
+ vmcs_write64(APIC_ACCESS_ADDR, page_to_phys(page));
+ vmx_flush_tlb_current(vcpu);
+
+ /*
+ * Do not pin apic access page in memory, the MMU notifier
+ * will call us again if it is migrated or swapped out.
+ */
+ put_page(page);
}
static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
@@ -6230,16 +6385,16 @@ static void vmx_apicv_post_state_restore(struct kvm_vcpu *vcpu)
static void handle_exception_nmi_irqoff(struct vcpu_vmx *vmx)
{
- vmx->exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+ u32 intr_info = vmx_get_intr_info(&vmx->vcpu);
/* if exit due to PF check for async PF */
- if (is_page_fault(vmx->exit_intr_info)) {
- vmx->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
+ if (is_page_fault(intr_info)) {
+ vmx->vcpu.arch.apf.host_apf_flags = kvm_read_and_reset_apf_flags();
/* Handle machine checks before interrupts are enabled */
- } else if (is_machine_check(vmx->exit_intr_info)) {
+ } else if (is_machine_check(intr_info)) {
kvm_machine_check();
/* We need to handle NMIs before interrupts are enabled */
- } else if (is_nmi(vmx->exit_intr_info)) {
+ } else if (is_nmi(intr_info)) {
kvm_before_interrupt(&vmx->vcpu);
asm("int $2");
kvm_after_interrupt(&vmx->vcpu);
@@ -6254,9 +6409,8 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
unsigned long tmp;
#endif
gate_desc *desc;
- u32 intr_info;
+ u32 intr_info = vmx_get_intr_info(vcpu);
- intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
if (WARN_ONCE(!is_external_intr(intr_info),
"KVM: unexpected VM-Exit interrupt info: 0x%x", intr_info))
return;
@@ -6269,13 +6423,13 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
asm volatile(
#ifdef CONFIG_X86_64
- "mov %%" _ASM_SP ", %[sp]\n\t"
- "and $0xfffffffffffffff0, %%" _ASM_SP "\n\t"
- "push $%c[ss]\n\t"
+ "mov %%rsp, %[sp]\n\t"
+ "and $-16, %%rsp\n\t"
+ "push %[ss]\n\t"
"push %[sp]\n\t"
#endif
"pushf\n\t"
- __ASM_SIZE(push) " $%c[cs]\n\t"
+ "push %[cs]\n\t"
CALL_NOSPEC
:
#ifdef CONFIG_X86_64
@@ -6284,7 +6438,9 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
ASM_CALL_CONSTRAINT
:
[thunk_target]"r"(entry),
+#ifdef CONFIG_X86_64
[ss]"i"(__KERNEL_DS),
+#endif
[cs]"i"(__KERNEL_CS)
);
@@ -6292,8 +6448,7 @@ static void handle_external_interrupt_irqoff(struct kvm_vcpu *vcpu)
}
STACK_FRAME_NON_STANDARD(handle_external_interrupt_irqoff);
-static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
- enum exit_fastpath_completion *exit_fastpath)
+static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6301,12 +6456,9 @@ static void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu,
handle_external_interrupt_irqoff(vcpu);
else if (vmx->exit_reason == EXIT_REASON_EXCEPTION_NMI)
handle_exception_nmi_irqoff(vmx);
- else if (!is_guest_mode(vcpu) &&
- vmx->exit_reason == EXIT_REASON_MSR_WRITE)
- *exit_fastpath = handle_fastpath_set_msr_irqoff(vcpu);
}
-static bool vmx_has_emulated_msr(int index)
+static bool vmx_has_emulated_msr(u32 index)
{
switch (index) {
case MSR_IA32_SMBASE:
@@ -6337,11 +6489,8 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
if (enable_vnmi) {
if (vmx->loaded_vmcs->nmi_known_unmasked)
return;
- /*
- * Can't use vmx->exit_intr_info since we're not sure what
- * the exit reason is.
- */
- exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO);
+
+ exit_intr_info = vmx_get_intr_info(&vmx->vcpu);
unblock_nmi = (exit_intr_info & INTR_INFO_UNBLOCK_NMI) != 0;
vector = exit_intr_info & INTR_INFO_VECTOR_MASK;
/*
@@ -6508,13 +6657,27 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
}
}
+static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+{
+ switch (to_vmx(vcpu)->exit_reason) {
+ case EXIT_REASON_MSR_WRITE:
+ return handle_fastpath_set_msr_irqoff(vcpu);
+ case EXIT_REASON_PREEMPTION_TIMER:
+ return handle_fastpath_preemption_timer(vcpu);
+ default:
+ return EXIT_FASTPATH_NONE;
+ }
+}
+
bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
-static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
{
+ fastpath_t exit_fastpath;
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long cr3, cr4;
+reenter_guest:
/* Record the guest's net vcpu time for enforced NMI injections. */
if (unlikely(!enable_vnmi &&
vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6523,7 +6686,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
/* Don't enter VMX if guest state is invalid, let the exit handler
start emulation until we arrive back to a valid state */
if (vmx->emulation_required)
- return;
+ return EXIT_FASTPATH_NONE;
if (vmx->ple_window_dirty) {
vmx->ple_window_dirty = false;
@@ -6643,12 +6806,7 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
loadsegment(es, __USER_DS);
#endif
- vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
- | (1 << VCPU_EXREG_RFLAGS)
- | (1 << VCPU_EXREG_PDPTR)
- | (1 << VCPU_EXREG_SEGMENTS)
- | (1 << VCPU_EXREG_CR3));
- vcpu->arch.regs_dirty = 0;
+ vmx_register_cache_reset(vcpu);
pt_guest_exit(vmx);
@@ -6657,18 +6815,45 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmx->nested.nested_run_pending = 0;
vmx->idt_vectoring_info = 0;
- vmx->exit_reason = vmx->fail ? 0xdead : vmcs_read32(VM_EXIT_REASON);
- if ((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY)
+ if (unlikely(vmx->fail)) {
+ vmx->exit_reason = 0xdead;
+ return EXIT_FASTPATH_NONE;
+ }
+
+ vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
+ if (unlikely((u16)vmx->exit_reason == EXIT_REASON_MCE_DURING_VMENTRY))
kvm_machine_check();
- if (vmx->fail || (vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
- return;
+ trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
+
+ if (unlikely(vmx->exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY))
+ return EXIT_FASTPATH_NONE;
vmx->loaded_vmcs->launched = 1;
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
vmx_recover_nmi_blocking(vmx);
vmx_complete_interrupts(vmx);
+
+ if (is_guest_mode(vcpu))
+ return EXIT_FASTPATH_NONE;
+
+ exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+ if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
+ if (!kvm_vcpu_exit_request(vcpu)) {
+ /*
+ * FIXME: this goto should be a loop in vcpu_enter_guest,
+ * but it would incur the cost of a retpoline for now.
+ * Revisit once static calls are available.
+ */
+ if (vcpu->arch.apicv_active)
+ vmx_sync_pir_to_irr(vcpu);
+ goto reenter_guest;
+ }
+ exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+ }
+
+ return exit_fastpath;
}
static void vmx_free_vcpu(struct kvm_vcpu *vcpu)
@@ -7123,10 +7308,6 @@ static __init void vmx_set_cpu_caps(void)
if (vmx_pt_mode_is_host_guest())
kvm_cpu_cap_check_and_set(X86_FEATURE_INTEL_PT);
- /* PKU is not yet implemented for shadow paging. */
- if (enable_ept && boot_cpu_has(X86_FEATURE_OSPKE))
- kvm_cpu_cap_check_and_set(X86_FEATURE_PKU);
-
if (vmx_umip_emulated())
kvm_cpu_cap_set(X86_FEATURE_UMIP);
@@ -7138,6 +7319,9 @@ static __init void vmx_set_cpu_caps(void)
/* CPUID 0x80000001 */
if (!cpu_has_vmx_rdtscp())
kvm_cpu_cap_clear(X86_FEATURE_RDTSCP);
+
+ if (vmx_waitpkg_supported())
+ kvm_cpu_cap_check_and_set(X86_FEATURE_WAITPKG);
}
static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
@@ -7253,10 +7437,6 @@ static int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
u64 tscl, guest_tscl, delta_tsc, lapic_timer_advance_cycles;
struct kvm_timer *ktimer = &vcpu->arch.apic->lapic_timer;
- if (kvm_mwait_in_guest(vcpu->kvm) ||
- kvm_can_post_timer_interrupt(vcpu))
- return -EOPNOTSUPP;
-
vmx = to_vmx(vcpu);
tscl = rdtsc();
guest_tscl = kvm_read_l1_tsc(vcpu, tscl);
@@ -7599,12 +7779,12 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
~FEAT_CTL_LMCE_ENABLED;
}
-static int vmx_smi_allowed(struct kvm_vcpu *vcpu)
+static int vmx_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
{
/* we need a nested vmexit to enter SMM, postpone if run is pending */
if (to_vmx(vcpu)->nested.nested_run_pending)
- return 0;
- return 1;
+ return -EBUSY;
+ return !is_smm(vcpu);
}
static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
@@ -7641,9 +7821,9 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
return 0;
}
-static int enable_smi_window(struct kvm_vcpu *vcpu)
+static void enable_smi_window(struct kvm_vcpu *vcpu)
{
- return 0;
+ /* RSM will cause a vmexit anyway. */
}
static bool vmx_need_emulation_on_page_fault(struct kvm_vcpu *vcpu)
@@ -7656,6 +7836,16 @@ static bool vmx_apic_init_signal_blocked(struct kvm_vcpu *vcpu)
return to_vmx(vcpu)->nested.vmxon;
}
+static void vmx_migrate_timers(struct kvm_vcpu *vcpu)
+{
+ if (is_guest_mode(vcpu)) {
+ struct hrtimer *timer = &to_vmx(vcpu)->nested.preemption_timer;
+
+ if (hrtimer_try_to_cancel(timer) == 1)
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+ }
+}
+
static void hardware_unsetup(void)
{
if (nested)
@@ -7700,8 +7890,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_segment = vmx_set_segment,
.get_cpl = vmx_get_cpl,
.get_cs_db_l_bits = vmx_get_cs_db_l_bits,
- .decache_cr0_guest_bits = vmx_decache_cr0_guest_bits,
- .decache_cr4_guest_bits = vmx_decache_cr4_guest_bits,
.set_cr0 = vmx_set_cr0,
.set_cr4 = vmx_set_cr4,
.set_efer = vmx_set_efer,
@@ -7715,8 +7903,10 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.get_rflags = vmx_get_rflags,
.set_rflags = vmx_set_rflags,
- .tlb_flush = vmx_flush_tlb,
+ .tlb_flush_all = vmx_flush_tlb_all,
+ .tlb_flush_current = vmx_flush_tlb_current,
.tlb_flush_gva = vmx_flush_tlb_gva,
+ .tlb_flush_guest = vmx_flush_tlb_guest,
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
@@ -7751,7 +7941,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.set_tss_addr = vmx_set_tss_addr,
.set_identity_map_addr = vmx_set_identity_map_addr,
- .get_tdp_level = get_ept_level,
+ .get_tdp_level = vmx_get_tdp_level,
.get_mt_mask = vmx_get_mt_mask,
.get_exit_info = vmx_get_exit_info,
@@ -7760,7 +7950,6 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
- .read_l1_tsc_offset = vmx_read_l1_tsc_offset,
.write_l1_tsc_offset = vmx_write_l1_tsc_offset,
.load_mmu_pgd = vmx_load_mmu_pgd,
@@ -7782,6 +7971,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.post_block = vmx_post_block,
.pmu_ops = &intel_pmu_ops,
+ .nested_ops = &vmx_nested_ops,
.update_pi_irte = vmx_update_pi_irte,
@@ -7797,14 +7987,9 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
.pre_leave_smm = vmx_pre_leave_smm,
.enable_smi_window = enable_smi_window,
- .check_nested_events = NULL,
- .get_nested_state = NULL,
- .set_nested_state = NULL,
- .get_vmcs12_pages = NULL,
- .nested_enable_evmcs = NULL,
- .nested_get_evmcs_version = NULL,
.need_emulation_on_page_fault = vmx_need_emulation_on_page_fault,
.apic_init_signal_blocked = vmx_apic_init_signal_blocked,
+ .migrate_timers = vmx_migrate_timers,
};
static __init int hardware_setup(void)
@@ -7903,11 +8088,11 @@ static __init int hardware_setup(void)
if (!enable_ept)
ept_lpage_level = 0;
else if (cpu_has_vmx_ept_1g_page())
- ept_lpage_level = PT_PDPE_LEVEL;
+ ept_lpage_level = PG_LEVEL_1G;
else if (cpu_has_vmx_ept_2m_page())
- ept_lpage_level = PT_DIRECTORY_LEVEL;
+ ept_lpage_level = PG_LEVEL_2M;
else
- ept_lpage_level = PT_PAGE_TABLE_LEVEL;
+ ept_lpage_level = PG_LEVEL_4K;
kvm_configure_mmu(enable_ept, ept_lpage_level);
/*
@@ -7967,8 +8152,7 @@ static __init int hardware_setup(void)
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
vmx_capability.ept);
- r = nested_vmx_hardware_setup(&vmx_x86_ops,
- kvm_vmx_exit_handlers);
+ r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
if (r)
return r;
}