summaryrefslogtreecommitdiff
path: root/arch/s390/kvm
diff options
context:
space:
mode:
authorDavid Hildenbrand <dahi@linux.vnet.ibm.com>2016-01-27 19:24:03 +0300
committerChristian Borntraeger <borntraeger@de.ibm.com>2016-06-20 10:54:40 +0300
commitf4debb40903978bbddfb9e877ca4d2f27e26567f (patch)
treeb2002d9cc11358bbf937695d8344d7ed2441c2f0 /arch/s390/kvm
parent7a6741576b268820c8bd2b66288e6ff3bc57d4a7 (diff)
downloadlinux-f4debb40903978bbddfb9e877ca4d2f27e26567f.tar.xz
s390/mm: take ipte_lock during shadow faults
Let's take the ipte_lock while working on guest 2 provided page table, just like the other gaccess functions. Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> Signed-off-by: David Hildenbrand <dahi@linux.vnet.ibm.com> Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Diffstat (limited to 'arch/s390/kvm')
-rw-r--r--arch/s390/kvm/gaccess.c11
-rw-r--r--arch/s390/kvm/gaccess.h3
2 files changed, 12 insertions, 2 deletions
diff --git a/arch/s390/kvm/gaccess.c b/arch/s390/kvm/gaccess.c
index b2783dd71854..e70f916c1079 100644
--- a/arch/s390/kvm/gaccess.c
+++ b/arch/s390/kvm/gaccess.c
@@ -1073,6 +1073,7 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
/**
* kvm_s390_shadow_fault - handle fault on a shadow page table
+ * @vcpu: virtual cpu
* @sg: pointer to the shadow guest address space structure
* @saddr: faulting address in the shadow gmap
*
@@ -1082,7 +1083,8 @@ static int kvm_s390_shadow_tables(struct gmap *sg, unsigned long saddr,
* - -EFAULT when accessing invalid guest addresses
* - -ENOMEM if out of memory
*/
-int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
+int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *sg,
+ unsigned long saddr)
{
union vaddress vaddr;
union page_table_entry pte;
@@ -1091,6 +1093,12 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
int rc;
down_read(&sg->mm->mmap_sem);
+ /*
+ * We don't want any guest-2 tables to change - so the parent
+ * tables/pointers we read stay valid - unshadowing is however
+ * always possible - only guest_table_lock protects us.
+ */
+ ipte_lock(vcpu);
rc = gmap_shadow_pgt_lookup(sg, saddr, &pgt, &dat_protection);
if (rc)
@@ -1105,6 +1113,7 @@ int kvm_s390_shadow_fault(struct gmap *sg, unsigned long saddr)
rc = PGM_TRANSLATION_SPEC;
if (!rc)
rc = gmap_shadow_page(sg, saddr, __pte(pte.val));
+ ipte_unlock(vcpu);
up_read(&sg->mm->mmap_sem);
return rc;
}
diff --git a/arch/s390/kvm/gaccess.h b/arch/s390/kvm/gaccess.h
index 0d044d09dbd8..8756569ad938 100644
--- a/arch/s390/kvm/gaccess.h
+++ b/arch/s390/kvm/gaccess.h
@@ -361,6 +361,7 @@ void ipte_unlock(struct kvm_vcpu *vcpu);
int ipte_lock_held(struct kvm_vcpu *vcpu);
int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
-int kvm_s390_shadow_fault(struct gmap *shadow, unsigned long saddr);
+int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
+ unsigned long saddr);
#endif /* __KVM_S390_GACCESS_H */