summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_mmu.h
diff options
context:
space:
mode:
authorChristoffer Dall <christoffer.dall@linaro.org>2023-02-09 20:58:20 +0300
committerOliver Upton <oliver.upton@linux.dev>2023-02-11 13:13:30 +0300
commit191e0e155521182051fc2f32dde237b6fde2b0b4 (patch)
treeee99d2b595ccd538d2637740e317eccc533145d5 /arch/arm64/include/asm/kvm_mmu.h
parent9f75b6d447d712b6ed9abc869eedf456fe7f5e9b (diff)
downloadlinux-191e0e155521182051fc2f32dde237b6fde2b0b4.tar.xz
KVM: arm64: nv: Only toggle cache for virtual EL2 when SCTLR_EL2 changes
So far we were flushing almost the entire universe whenever a VM would load/unload the SCTLR_EL1 and the two versions of that register had different MMU enabled settings. This turned out to be so slow that it prevented forward progress for a nested VM, because a scheduler timer tick interrupt would always be pending when we reached the nested VM. To avoid this problem, we consider the SCTLR_EL2 when evaluating if caches are on or off when entering virtual EL2 (because this is the value that we end up shadowing onto the hardware EL1 register). Reviewed-by: Alexandru Elisei <alexandru.elisei@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Jintack Lim <jintack.lim@linaro.org> Signed-off-by: Marc Zyngier <maz@kernel.org> Link: https://lore.kernel.org/r/20230209175820.1939006-19-maz@kernel.org Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
Diffstat (limited to 'arch/arm64/include/asm/kvm_mmu.h')
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h11
1 files changed, 10 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index e4a7e6369499..2890d57bec30 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -115,6 +115,7 @@ alternative_cb_end
#include <asm/cache.h>
#include <asm/cacheflush.h>
#include <asm/mmu_context.h>
+#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
void kvm_update_va_mask(struct alt_instr *alt,
@@ -192,7 +193,15 @@ struct kvm;
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
- return (vcpu_read_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
+ u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
+ int reg;
+
+ if (vcpu_is_el2(vcpu))
+ reg = SCTLR_EL2;
+ else
+ reg = SCTLR_EL1;
+
+ return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
}
static inline void __clean_dcache_guest_page(void *va, size_t size)