summaryrefslogtreecommitdiff
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2015-05-18 16:03:39 +0300
committerPaolo Bonzini <pbonzini@redhat.com>2015-06-05 18:26:37 +0300
commit699023e239658e62da6f42f47d31b54788521ec1 (patch)
tree01dcd8c0ad2100dc1c8a4834358a0fb0d2f0fa1c /arch/x86/kvm/mmu.c
parent9da0e4d5ac969909f6b435ce28ea28135a9cbd69 (diff)
downloadlinux-699023e239658e62da6f42f47d31b54788521ec1.tar.xz
KVM: x86: add SMM to the MMU role, support SMRAM address space
This is now very simple to do. The only interesting part is a simple trick to find the right memslot in gfn_to_rmap, retrieving the address space from the spte role word. The same trick is used in the auditing code. The comment on top of union kvm_mmu_page_role has been stale forever, so remove it. Speaking of stale code, remove pad_for_nice_hex_output too: it was splitting the "access" bitfield across two bytes and thus had effectively turned into pad_for_ugly_hex_output. Reviewed-by: Radim Krčmář <rkrcmar@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7619e9e1745c..c88f0e443669 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -806,13 +806,15 @@ static struct kvm_lpage_info *lpage_info_slot(gfn_t gfn,
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn;
int i;
gfn = sp->gfn;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count += 1;
@@ -822,13 +824,15 @@ static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
struct kvm_lpage_info *linfo;
gfn_t gfn;
int i;
gfn = sp->gfn;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
linfo = lpage_info_slot(gfn, slot, i);
linfo->write_count -= 1;
@@ -1045,9 +1049,11 @@ static unsigned long *__gfn_to_rmap(gfn_t gfn, int level,
*/
static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, struct kvm_mmu_page *sp)
{
+ struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
- slot = gfn_to_memslot(kvm, gfn);
+ slots = kvm_memslots_for_spte_role(kvm, sp->role);
+ slot = __gfn_to_memslot(slots, gfn);
return __gfn_to_rmap(gfn, sp->role.level, slot);
}
@@ -3924,6 +3930,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
struct kvm_mmu *context = &vcpu->arch.mmu;
context->base_role.word = 0;
+ context->base_role.smm = is_smm(vcpu);
context->page_fault = tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
@@ -3985,6 +3992,7 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
= smep && !is_write_protection(vcpu);
context->base_role.smap_andnot_wp
= smap && !is_write_protection(vcpu);
+ context->base_role.smm = is_smm(vcpu);
}
EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
@@ -4268,6 +4276,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
mask.nxe = 1;
mask.smep_andnot_wp = 1;
mask.smap_andnot_wp = 1;
+ mask.smm = 1;
/*
* If we don't have indirect shadow pages, it means no page is