summaryrefslogtreecommitdiff
path: root/drivers/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-05-01 17:53:31 +0400
committerAvi Kivity <avi@qumranet.com>2007-07-16 13:05:39 +0400
commit0028425f647b6b78a0de8810d6b782fc3ce6c272 (patch)
tree8afece215ec531f993c28cc3dee17b0c2c1dd3b1 /drivers/kvm/paging_tmpl.h
parentfce0657ff9f14f6b1f147b5fcd6db2f54c06424e (diff)
downloadlinux-0028425f647b6b78a0de8810d6b782fc3ce6c272.tar.xz
KVM: Update shadow pte on write to guest pte
A typical demand page/copy on write pattern is: - page fault on vaddr - kvm propagates fault to guest - guest handles fault, updates pte - kvm traps write, clears shadow pte, resumes guest - guest returns to userspace, re-faults on same vaddr - kvm installs shadow pte, resumes guest - guest continues So, three vmexits for a single guest page fault. But if instead of clearing the page table entry, we update to correspond to the value that the guest has just written, we eliminate the third vmexit. This patch does exactly that, reducing kbuild time by about 10%. Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/paging_tmpl.h')
-rw-r--r--drivers/kvm/paging_tmpl.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/drivers/kvm/paging_tmpl.h b/drivers/kvm/paging_tmpl.h
index bc64cceec039..10ba0a80ce59 100644
--- a/drivers/kvm/paging_tmpl.h
+++ b/drivers/kvm/paging_tmpl.h
@@ -202,6 +202,21 @@ static void FNAME(set_pte)(struct kvm_vcpu *vcpu, u64 guest_pte,
guest_pte & PT_DIRTY_MASK, access_bits, gfn);
}
+static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *page,
+ u64 *spte, const void *pte, int bytes)
+{
+ pt_element_t gpte;
+
+ if (bytes < sizeof(pt_element_t))
+ return;
+ gpte = *(const pt_element_t *)pte;
+ if (~gpte & (PT_PRESENT_MASK | PT_ACCESSED_MASK))
+ return;
+ pgprintk("%s: gpte %llx spte %p\n", __FUNCTION__, (u64)gpte, spte);
+ FNAME(set_pte)(vcpu, gpte, spte, 6,
+ (gpte & PT_BASE_ADDR_MASK) >> PAGE_SHIFT);
+}
+
static void FNAME(set_pde)(struct kvm_vcpu *vcpu, u64 guest_pde,
u64 *shadow_pte, u64 access_bits, gfn_t gfn)
{