summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-07-16 07:30:18 +0400
committerAvi Kivity <avi@redhat.com>2010-08-02 07:41:01 +0400
commit9a3aad70572c3f4d55e7f09ac4eb313d41d0a484 (patch)
treef3c1efd08eed5b0f37078c9b91658e25e7775808
parente4b502ead259fcf70839414abb7c8cdc3b523f01 (diff)
downloadlinux-9a3aad70572c3f4d55e7f09ac4eb313d41d0a484.tar.xz
KVM: MMU: using __xchg_spte more smarter
Sometimes, atomically set spte is not needed, this patch call __xchg_spte() more smartly Note: if the old mapping's access bit is already set, we no need atomic operation since the access bit is not lost Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c9
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e4b862eb8885..0dcc95e09876 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -682,9 +682,14 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
static void set_spte_track_bits(u64 *sptep, u64 new_spte)
{
pfn_t pfn;
- u64 old_spte;
+ u64 old_spte = *sptep;
+
+ if (!shadow_accessed_mask || !is_shadow_present_pte(old_spte) ||
+ old_spte & shadow_accessed_mask) {
+ __set_spte(sptep, new_spte);
+ } else
+ old_spte = __xchg_spte(sptep, new_spte);
- old_spte = __xchg_spte(sptep, new_spte);
if (!is_rmap_spte(old_spte))
return;
pfn = spte_to_pfn(old_spte);