diff options
author | Marc Zyngier <maz@kernel.org> | 2021-11-16 15:39:35 +0300 |
---|---|---|
committer | Marc Zyngier <maz@kernel.org> | 2021-11-24 16:30:50 +0300 |
commit | 7183b2b5ae6b8d77a37069566d77cf2a74060f7e (patch) | |
tree | f3c21674d9977b3e8da895e089872d25650e9d8e /arch/arm64/kvm/hyp/nvhe/switch.c | |
parent | 83bb2c1a01d7127d5adc7d69d7aaa3f7072de2b4 (diff) | |
download | linux-7183b2b5ae6b8d77a37069566d77cf2a74060f7e.tar.xz |
KVM: arm64: Move pkvm's special 32bit handling into a generic infrastructure
Protected KVM is trying to turn AArch32 exceptions into an illegal
exception entry. Unfortunately, it does that in a way that is a bit
abrupt, and too early for PSTATE to be available.
Instead, move it to the fixup code, which is a more reasonable place
for it. This will also be useful for the NV code.
Reviewed-by: Fuad Tabba <tabba@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Diffstat (limited to 'arch/arm64/kvm/hyp/nvhe/switch.c')
-rw-r--r-- | arch/arm64/kvm/hyp/nvhe/switch.c | 8 |
1 files changed, 1 insertions, 7 deletions
diff --git a/arch/arm64/kvm/hyp/nvhe/switch.c b/arch/arm64/kvm/hyp/nvhe/switch.c index c0e3fed26d93..d13115a12434 100644 --- a/arch/arm64/kvm/hyp/nvhe/switch.c +++ b/arch/arm64/kvm/hyp/nvhe/switch.c @@ -233,7 +233,7 @@ static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu) * Returns false if the guest ran in AArch32 when it shouldn't have, and * thus should exit to the host, or true if a the guest run loop can continue. */ -static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) +static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code) { struct kvm *kvm = kern_hyp_va(vcpu->kvm); @@ -248,10 +248,7 @@ static bool handle_aarch32_guest(struct kvm_vcpu *vcpu, u64 *exit_code) vcpu->arch.target = -1; *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT); *exit_code |= ARM_EXCEPTION_IL; - return false; } - - return true; } /* Switch to the guest for legacy non-VHE systems */ @@ -316,9 +313,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu) /* Jump in the fire! */ exit_code = __guest_enter(vcpu); - if (unlikely(!handle_aarch32_guest(vcpu, &exit_code))) - break; - /* And we're baaack! */ } while (fixup_guest_exit(vcpu, &exit_code)); |