diff options
Diffstat (limited to 'arch/x86/kvm/mmu.h')
-rw-r--r-- | arch/x86/kvm/mmu.h | 30 |
1 files changed, 19 insertions, 11 deletions
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h index 88d0ed5225a4..83e6c6965f1e 100644 --- a/arch/x86/kvm/mmu.h +++ b/arch/x86/kvm/mmu.h @@ -44,6 +44,12 @@ #define PT32_ROOT_LEVEL 2 #define PT32E_ROOT_LEVEL 3 +#define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | \ + X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE | \ + X86_CR4_LA57) + +#define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP) + static __always_inline u64 rsvd_bits(int s, int e) { BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s); @@ -62,12 +68,9 @@ static __always_inline u64 rsvd_bits(int s, int e) void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask); void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only); -void -reset_shadow_zero_bits_mask(struct kvm_vcpu *vcpu, struct kvm_mmu *context); - -void kvm_init_mmu(struct kvm_vcpu *vcpu, bool reset_roots); -void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, u32 cr0, u32 cr4, u32 efer, - gpa_t nested_cr3); +void kvm_init_mmu(struct kvm_vcpu *vcpu); +void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0, + unsigned long cr4, u64 efer, gpa_t nested_cr3); void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, bool accessed_dirty, gpa_t new_eptp); bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu); @@ -162,11 +165,6 @@ static inline bool is_writable_pte(unsigned long pte) return pte & PT_WRITABLE_MASK; } -static inline bool is_write_protection(struct kvm_vcpu *vcpu) -{ - return kvm_read_cr0_bits(vcpu, X86_CR0_WP); -} - /* * Check if a given access (described through the I/D, W/R and U/S bits of a * page fault error code pfec) causes a permission fault with the given PTE @@ -232,4 +230,14 @@ int kvm_arch_write_log_dirty(struct kvm_vcpu *vcpu); int kvm_mmu_post_init_vm(struct kvm *kvm); void kvm_mmu_pre_destroy_vm(struct kvm *kvm); +static inline bool kvm_memslots_have_rmaps(struct kvm *kvm) +{ + /* + * Read memslot_have_rmaps before rmap pointers. Hence, threads reading + * memslots_have_rmaps in any lock context are guaranteed to see the + * pointers. Pairs with smp_store_release in alloc_all_memslots_rmaps. + */ + return smp_load_acquire(&kvm->arch.memslots_have_rmaps); +} + #endif |