summaryrefslogtreecommitdiff
path: root/arch/x86/include
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2020-03-21 23:26:00 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2020-03-31 17:48:08 +0300
commitafaf0b2f9b801c6eb2278b52d49e6a7d7b659cf1 (patch)
tree5d2d8a4ba51297ef0c1a421327da285fb32d72d5 /arch/x86/include
parent69c6f69aa3064ab6cc8426661f125ea75ffe899c (diff)
downloadlinux-afaf0b2f9b801c6eb2278b52d49e6a7d7b659cf1.tar.xz
KVM: x86: Copy kvm_x86_ops by value to eliminate layer of indirection
Replace the kvm_x86_ops pointer in common x86 with an instance of the struct to save one pointer dereference when invoking functions. Copy the struct by value to set the ops during kvm_init(). Arbitrarily use kvm_x86_ops.hardware_enable to track whether or not the ops have been initialized, i.e. a vendor KVM module has been loaded. Suggested-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20200321202603.19355-7-sean.j.christopherson@intel.com> Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/include')
-rw-r--r--arch/x86/include/asm/kvm_host.h18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f4c5b49299ff..54f991244fae 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1274,13 +1274,13 @@ struct kvm_arch_async_pf {
extern u64 __read_mostly host_efer;
-extern struct kvm_x86_ops *kvm_x86_ops;
+extern struct kvm_x86_ops kvm_x86_ops;
extern struct kmem_cache *x86_fpu_cache;
#define __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
- return __vmalloc(kvm_x86_ops->vm_size,
+ return __vmalloc(kvm_x86_ops.vm_size,
GFP_KERNEL_ACCOUNT | __GFP_ZERO, PAGE_KERNEL);
}
void kvm_arch_free_vm(struct kvm *kvm);
@@ -1288,8 +1288,8 @@ void kvm_arch_free_vm(struct kvm *kvm);
#define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
{
- if (kvm_x86_ops->tlb_remote_flush &&
- !kvm_x86_ops->tlb_remote_flush(kvm))
+ if (kvm_x86_ops.tlb_remote_flush &&
+ !kvm_x86_ops.tlb_remote_flush(kvm))
return 0;
else
return -ENOTSUPP;
@@ -1375,7 +1375,7 @@ extern u64 kvm_mce_cap_supported;
*
* EMULTYPE_SKIP - Set when emulating solely to skip an instruction, i.e. to
* decode the instruction length. For use *only* by
- * kvm_x86_ops->skip_emulated_instruction() implementations.
+ * kvm_x86_ops.skip_emulated_instruction() implementations.
*
* EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
* retry native execution under certain conditions,
@@ -1669,14 +1669,14 @@ static inline bool kvm_irq_is_postable(struct kvm_lapic_irq *irq)
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->vcpu_blocking)
- kvm_x86_ops->vcpu_blocking(vcpu);
+ if (kvm_x86_ops.vcpu_blocking)
+ kvm_x86_ops.vcpu_blocking(vcpu);
}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu)
{
- if (kvm_x86_ops->vcpu_unblocking)
- kvm_x86_ops->vcpu_unblocking(vcpu);
+ if (kvm_x86_ops.vcpu_unblocking)
+ kvm_x86_ops.vcpu_unblocking(vcpu);
}
static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}