summaryrefslogtreecommitdiff
path: root/arch/arm64/include/asm/kvm_emulate.h
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2016-01-29 18:01:28 +0300
committerMarc Zyngier <marc.zyngier@arm.com>2016-02-29 21:34:15 +0300
commit57c841f131ef295b583365d2fddd6b0d16e82c10 (patch)
treee015fcf1eccbcc2d01f87762cddb29738e1a3bf8 /arch/arm64/include/asm/kvm_emulate.h
parent402f352876ba0df574533e59d72fc3e9871f791a (diff)
downloadlinux-57c841f131ef295b583365d2fddd6b0d16e82c10.tar.xz
arm/arm64: KVM: Handle out-of-RAM cache maintenance as a NOP
So far, our handling of cache maintenance by VA has been pretty simple: Either the access is in the guest RAM and generates a S2 fault, which results in the page being mapped RW, or we go down the io_mem_abort() path, and nuke the guest. The first one is fine, but the second one is extremely weird. Treating the CM as an I/O is wrong, and nothing in the ARM ARM indicates that we should generate a fault for something that cannot end-up in the cache anyway (even if the guest maps it, it will keep on faulting at stage-2 for emulation). So let's just skip this instruction, and let the guest get away with it. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/include/asm/kvm_emulate.h')
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 779a5872a2c5..4df8e7a58c6b 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -189,6 +189,11 @@ static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
+}
+
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);