summaryrefslogtreecommitdiff
path: root/arch
diff options
context:
space:
mode:
authorBoris Ostrovsky <boris.ostrovsky@oracle.com>2019-12-05 04:30:51 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-02-11 15:35:40 +0300
commitf7c1a6c67ff36532f1b0b339e3aae7701a2c0b1e (patch)
tree00a88bed00532791eff4d842f3bc7919172fdd65 /arch
parentd71eef9fcc0b81fd56e59afd305a215d81239894 (diff)
downloadlinux-f7c1a6c67ff36532f1b0b339e3aae7701a2c0b1e.tar.xz
x86/kvm: Cache gfn to pfn translation
commit 917248144db5d7320655dbb41d3af0b8a0f3d589 upstream. __kvm_map_gfn()'s call to gfn_to_pfn_memslot() is * relatively expensive * in certain cases (such as when done from atomic context) cannot be called Stashing gfn-to-pfn mapping should help with both cases. This is part of CVE-2019-3016. Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Reviewed-by: Joao Martins <joao.m.martins@oracle.com> Cc: stable@vger.kernel.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/x86.c10
2 files changed, 11 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 17b93f54ee43..6d6473c170a4 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -671,6 +671,7 @@ struct kvm_vcpu_arch {
u64 last_steal;
struct gfn_to_hva_cache stime;
struct kvm_steal_time steal;
+ struct gfn_to_pfn_cache cache;
} st;
u64 tsc_offset;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index b55bf17153ab..072ea5eb466c 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9081,6 +9081,9 @@ static void fx_init(struct kvm_vcpu *vcpu)
void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
void *wbinvd_dirty_mask = vcpu->arch.wbinvd_dirty_mask;
+ struct gfn_to_pfn_cache *cache = &vcpu->arch.st.cache;
+
+ kvm_release_pfn(cache->pfn, cache->dirty, cache);
kvmclock_reset(vcpu);
@@ -9745,11 +9748,18 @@ out_free:
void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen)
{
+ struct kvm_vcpu *vcpu;
+ int i;
+
/*
* memslots->generation has been incremented.
* mmio generation may have reached its maximum value.
*/
kvm_mmu_invalidate_mmio_sptes(kvm, gen);
+
+ /* Force re-initialization of steal_time cache */
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_vcpu_kick(vcpu);
}
int kvm_arch_prepare_memory_region(struct kvm *kvm,