summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu/mmu_internal.h
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2021-08-17 15:46:45 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2021-10-01 10:44:56 +0300
commit53597858dbf8daab8db99c7e448558fb0f970dbd (patch)
treef312c610ccd84dc594c360e8c704e19216f313ac /arch/x86/kvm/mmu/mmu_internal.h
parent8a9f566ae4a4156343afb5cbfa79401c07647b1d (diff)
downloadlinux-53597858dbf8daab8db99c7e448558fb0f970dbd.tar.xz
KVM: x86/mmu: Avoid memslot lookup in make_spte and mmu_try_to_unsync_pages
mmu_try_to_unsync_pages checks if page tracking is active for the given gfn, which requires knowing the memslot. We can pass down the memslot via make_spte to avoid this lookup. The memslot is also handy for make_spte's marking of the gfn as dirty: we can test whether dirty page tracking is enabled, and if so ensure that pages are mapped as writable with 4K granularity. Apart from the warning, no functional change is intended. Signed-off-by: David Matlack <dmatlack@google.com> Message-Id: <20210813203504.2742757-7-dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu/mmu_internal.h')
-rw-r--r--arch/x86/kvm/mmu/mmu_internal.h4
1 files changed, 2 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 94f4e754facb..585146a712d2 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -118,8 +118,8 @@ static inline bool kvm_vcpu_ad_need_write_protect(struct kvm_vcpu *vcpu)
kvm_x86_ops.cpu_dirty_log_size;
}
-int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn, bool can_unsync,
- bool speculative);
+int mmu_try_to_unsync_pages(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
+ gfn_t gfn, bool can_unsync, bool speculative);
void kvm_mmu_gfn_disallow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);
void kvm_mmu_gfn_allow_lpage(const struct kvm_memory_slot *slot, gfn_t gfn);