summaryrefslogtreecommitdiff
path: root/arch/arm64/kvm/arm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kvm/arm.c')
-rw-r--r--arch/arm64/kvm/arm.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index f56122eedffc..a3b32df1afb0 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -808,6 +808,25 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu)
preempt_enable();
+ /*
+ * The ARMv8 architecture doesn't give the hypervisor
+ * a mechanism to prevent a guest from dropping to AArch32 EL0
+ * if implemented by the CPU. If we spot the guest in such
+ * state and that we decided it wasn't supposed to do so (like
+ * with the asymmetric AArch32 case), return to userspace with
+ * a fatal error.
+ */
+ if (!system_supports_32bit_el0() && vcpu_mode_is_32bit(vcpu)) {
+ /*
+ * As we have caught the guest red-handed, decide that
+ * it isn't fit for purpose anymore by making the vcpu
+ * invalid. The VMM can try and fix it by issuing a
+ * KVM_ARM_VCPU_INIT if it really wants to.
+ */
+ vcpu->arch.target = -1;
+ ret = ARM_EXCEPTION_IL;
+ }
+
ret = handle_exit(vcpu, ret);
}