From f2bc14b69c38b60f201fdf90c08cb2dc8966f331 Mon Sep 17 00:00:00 2001 From: Vitaly Kuznetsov Date: Tue, 26 Jan 2021 14:48:12 +0100 Subject: KVM: x86: hyper-v: Prepare to meet unallocated Hyper-V context Currently, Hyper-V context is part of 'struct kvm_vcpu_arch' and is always available. As a preparation to allocating it dynamically, check that it is not NULL at call sites which can normally proceed without it i.e. the behavior is identical to the situation when Hyper-V emulation is not being used by the guest. When Hyper-V context for a particular vCPU is not allocated, we may still need to get 'vp_index' from there. E.g. in a hypothetical situation when Hyper-V emulation was enabled on one CPU and wasn't on another, Hyper-V style send-IPI hypercall may still be used. Luckily, vp_index is always initialized to kvm_vcpu_get_idx() and can only be changed when Hyper-V context is present. Introduce kvm_hv_get_vpindex() helper for simplification. No functional change intended. Signed-off-by: Vitaly Kuznetsov Message-Id: <20210126134816.1880136-12-vkuznets@redhat.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/hyperv.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'arch/x86/kvm/hyperv.c') diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c index 1482cad2770b..10e7ed2aa8fe 100644 --- a/arch/x86/kvm/hyperv.c +++ b/arch/x86/kvm/hyperv.c @@ -142,10 +142,10 @@ static struct kvm_vcpu *get_vcpu_by_vpidx(struct kvm *kvm, u32 vpidx) return NULL; vcpu = kvm_get_vcpu(kvm, vpidx); - if (vcpu && to_hv_vcpu(vcpu)->vp_index == vpidx) + if (vcpu && kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; kvm_for_each_vcpu(i, vcpu, kvm) - if (to_hv_vcpu(vcpu)->vp_index == vpidx) + if (kvm_hv_get_vpindex(vcpu) == vpidx) return vcpu; return NULL; } @@ -377,9 +377,7 @@ static int syndbg_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata, bool host) break; } - trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, - to_hv_vcpu(vcpu)->vp_index, msr, - *pdata); + trace_kvm_hv_syndbg_get_msr(vcpu->vcpu_id, kvm_hv_get_vpindex(vcpu), msr, *pdata); return 0; } @@ -806,6 +804,9 @@ void kvm_hv_process_stimers(struct kvm_vcpu *vcpu) u64 time_now, exp_time; int i; + if (!hv_vcpu) + return; + for (i = 0; i < ARRAY_SIZE(hv_vcpu->stimer); i++) if (test_and_clear_bit(i, hv_vcpu->stimer_pending_bitmap)) { stimer = &hv_vcpu->stimer[i]; @@ -842,6 +843,9 @@ bool kvm_hv_assist_page_enabled(struct kvm_vcpu *vcpu) { struct kvm_vcpu_hv *hv_vcpu = to_hv_vcpu(vcpu); + if (!hv_vcpu) + return false; + if (!(hv_vcpu->hv_vapic & HV_X64_MSR_VP_ASSIST_PAGE_ENABLE)) return false; return vcpu->arch.pv_eoi.msr_val & KVM_MSR_ENABLED; @@ -1504,8 +1508,7 @@ static __always_inline unsigned long *sparse_set_to_vcpu_mask( bitmap_zero(vcpu_bitmap, KVM_MAX_VCPUS); kvm_for_each_vcpu(i, vcpu, kvm) { - if (test_bit(to_hv_vcpu(vcpu)->vp_index, - (unsigned long *)vp_bitmap)) + if (test_bit(kvm_hv_get_vpindex(vcpu), (unsigned long *)vp_bitmap)) __set_bit(i, vcpu_bitmap); } return vcpu_bitmap; -- cgit v1.2.3