summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-01-25 15:29:59 +0300
committerMarc Zyngier <marc.zyngier@arm.com>2017-01-30 16:47:37 +0300
commit8f36ebaf21fdae99c091c67e8b6fab33969f2667 (patch)
tree19f178720ef8382deb6247a6b4c5c388fe479799
parente363e05e12d76b3311618d442ad545dff4d08728 (diff)
downloadlinux-8f36ebaf21fdae99c091c67e8b6fab33969f2667.tar.xz
arm/arm64: KVM: Enforce unconditional flush to PoC when mapping to stage-2
When we fault in a page, we flush it to the PoC (Point of Coherency) if the faulting vcpu has its own caches off, so that it can observe the page we just brought it. But if the vcpu has its caches on, we skip that step. Bad things happen when *another* vcpu tries to access that page with its own caches disabled. At that point, there is no garantee that the data has made it to the PoC, and we access stale data. The obvious fix is to always flush to PoC when a page is faulted in, no matter what the state of the vcpu is. Cc: stable@vger.kernel.org Fixes: 2d58b733c876 ("arm64: KVM: force cache clean on page fault when caches are off") Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_mmu.h9
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
2 files changed, 2 insertions, 10 deletions
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 74a44727f8e1..a58bbaa3ec60 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -150,18 +150,12 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
* and iterate over the range.
*/
- bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
-
VM_BUG_ON(size & ~PAGE_MASK);
- if (!need_flush && !icache_is_pipt())
- goto vipt_cache;
-
while (size) {
void *va = kmap_atomic_pfn(pfn);
- if (need_flush)
- kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
if (icache_is_pipt())
__cpuc_coherent_user_range((unsigned long)va,
@@ -173,7 +167,6 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
kunmap_atomic(va);
}
-vipt_cache:
if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 6f72fe8b0e3e..6d22017ebbad 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -241,8 +241,7 @@ static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
{
void *va = page_address(pfn_to_page(pfn));
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc(va, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
flush_icache_range((unsigned long)va,