diff options
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 68 |
1 files changed, 53 insertions, 15 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 101f53ccf571..307e5bddb6d9 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -675,11 +675,6 @@ struct svm_cpu_data { static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data); -struct svm_init_data { - int cpu; - int r; -}; - static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000}; #define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges) @@ -711,17 +706,17 @@ static u32 svm_msrpm_offset(u32 msr) static inline void clgi(void) { - asm volatile (__ex(SVM_CLGI)); + asm volatile (__ex("clgi")); } static inline void stgi(void) { - asm volatile (__ex(SVM_STGI)); + asm volatile (__ex("stgi")); } static inline void invlpga(unsigned long addr, u32 asid) { - asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid)); + asm volatile (__ex("invlpga %1, %0") : : "c"(asid), "a"(addr)); } static int get_npt_level(struct kvm_vcpu *vcpu) @@ -1456,10 +1451,11 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) g_tsc_offset = svm->vmcb->control.tsc_offset - svm->nested.hsave->control.tsc_offset; svm->nested.hsave->control.tsc_offset = offset; - } else - trace_kvm_write_tsc_offset(vcpu->vcpu_id, - svm->vmcb->control.tsc_offset, - offset); + } + + trace_kvm_write_tsc_offset(vcpu->vcpu_id, + svm->vmcb->control.tsc_offset - g_tsc_offset, + offset); svm->vmcb->control.tsc_offset = offset + g_tsc_offset; @@ -2129,6 +2125,13 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id) goto out; } + svm->vcpu.arch.guest_fpu = kmem_cache_zalloc(x86_fpu_cache, GFP_KERNEL); + if (!svm->vcpu.arch.guest_fpu) { + printk(KERN_ERR "kvm: failed to allocate vcpu's fpu\n"); + err = -ENOMEM; + goto free_partial_svm; + } + err = kvm_vcpu_init(&svm->vcpu, kvm, id); if (err) goto free_svm; @@ -2188,6 +2191,8 @@ free_page1: uninit: kvm_vcpu_uninit(&svm->vcpu); free_svm: + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); +free_partial_svm: kmem_cache_free(kvm_vcpu_cache, svm); out: return ERR_PTR(err); @@ -2217,6 +2222,7 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu) __free_page(virt_to_page(svm->nested.hsave)); __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER); kvm_vcpu_uninit(vcpu); + kmem_cache_free(x86_fpu_cache, svm->vcpu.arch.guest_fpu); kmem_cache_free(kvm_vcpu_cache, svm); } @@ -3278,6 +3284,8 @@ static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *fr dst->event_inj_err = from->event_inj_err; dst->nested_cr3 = from->nested_cr3; dst->virt_ext = from->virt_ext; + dst->pause_filter_count = from->pause_filter_count; + dst->pause_filter_thresh = from->pause_filter_thresh; } static int nested_svm_vmexit(struct vcpu_svm *svm) @@ -3356,6 +3364,11 @@ static int nested_svm_vmexit(struct vcpu_svm *svm) nested_vmcb->control.event_inj = 0; nested_vmcb->control.event_inj_err = 0; + nested_vmcb->control.pause_filter_count = + svm->vmcb->control.pause_filter_count; + nested_vmcb->control.pause_filter_thresh = + svm->vmcb->control.pause_filter_thresh; + /* We always set V_INTR_MASKING and remember the old value in hflags */ if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK)) nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK; @@ -3532,6 +3545,11 @@ static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, svm->vmcb->control.event_inj = nested_vmcb->control.event_inj; svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err; + svm->vmcb->control.pause_filter_count = + nested_vmcb->control.pause_filter_count; + svm->vmcb->control.pause_filter_thresh = + nested_vmcb->control.pause_filter_thresh; + nested_svm_unmap(page); /* Enter Guest-Mode */ @@ -5636,9 +5654,9 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) /* Enter guest mode */ "push %%" _ASM_AX " \n\t" "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t" - __ex(SVM_VMLOAD) "\n\t" - __ex(SVM_VMRUN) "\n\t" - __ex(SVM_VMSAVE) "\n\t" + __ex("vmload %%" _ASM_AX) "\n\t" + __ex("vmrun %%" _ASM_AX) "\n\t" + __ex("vmsave %%" _ASM_AX) "\n\t" "pop %%" _ASM_AX " \n\t" /* Save guest registers, load host registers */ @@ -5836,6 +5854,13 @@ static bool svm_cpu_has_accelerated_tpr(void) static bool svm_has_emulated_msr(int index) { + switch (index) { + case MSR_IA32_MCG_EXT_CTL: + return false; + default: + break; + } + return true; } @@ -5924,6 +5949,11 @@ static bool svm_umip_emulated(void) return false; } +static bool svm_pt_supported(void) +{ + return false; +} + static bool svm_has_wbinvd_exit(void) { return true; @@ -7053,6 +7083,12 @@ failed: return ret; } +static uint16_t nested_get_evmcs_version(struct kvm_vcpu *vcpu) +{ + /* Not supported */ + return 0; +} + static int nested_enable_evmcs(struct kvm_vcpu *vcpu, uint16_t *vmcs_version) { @@ -7159,6 +7195,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mpx_supported = svm_mpx_supported, .xsaves_supported = svm_xsaves_supported, .umip_emulated = svm_umip_emulated, + .pt_supported = svm_pt_supported, .set_supported_cpuid = svm_set_supported_cpuid, @@ -7191,6 +7228,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { .mem_enc_unreg_region = svm_unregister_enc_region, .nested_enable_evmcs = nested_enable_evmcs, + .nested_get_evmcs_version = nested_get_evmcs_version, }; static int __init svm_init(void) |