diff options
author | Nicholas Piggin <npiggin@gmail.com> | 2022-03-03 08:33:12 +0300 |
---|---|---|
committer | Michael Ellerman <mpe@ellerman.id.au> | 2022-05-13 14:34:33 +0300 |
commit | ad5ace91c55e7bd16813617f67bcb7619d51a295 (patch) | |
tree | 6ef784a62536d89b033da19995ebc30cd4a7a01e /arch/powerpc/kvm | |
parent | 026728dc5d41f830e8194fe01e432dd4eb9b3d9a (diff) | |
download | linux-ad5ace91c55e7bd16813617f67bcb7619d51a295.tar.xz |
KVM: PPC: Book3S HV P9: Move cede logic out of XIVE escalation rearming
Move the cede abort logic out of xive escalation rearming and into
the caller to prepare for handling a similar case with nested guest
entry.
Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
Reviewed-by: Cédric Le Goater <clg@kaod.org>
Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20220303053315.1056880-4-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv.c | 10 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_xive.c | 9 |
2 files changed, 14 insertions, 5 deletions
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c index 2bf1c23b0171..fecd8e3cc42f 100644 --- a/arch/powerpc/kvm/book3s_hv.c +++ b/arch/powerpc/kvm/book3s_hv.c @@ -4068,10 +4068,16 @@ static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, !(vcpu->arch.shregs.msr & MSR_PR)) { unsigned long req = kvmppc_get_gpr(vcpu, 3); - /* H_CEDE has to be handled now, not later */ + /* H_CEDE has to be handled now */ if (req == H_CEDE) { kvmppc_cede(vcpu); - kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */ + if (!kvmppc_xive_rearm_escalation(vcpu)) { + /* + * Pending escalation so abort + * the cede. + */ + vcpu->arch.ceded = 0; + } kvmppc_set_gpr(vcpu, 3, 0); trap = 0; diff --git a/arch/powerpc/kvm/book3s_xive.c b/arch/powerpc/kvm/book3s_xive.c index c0ce5531d9bc..4e7b9f0a21c7 100644 --- a/arch/powerpc/kvm/book3s_xive.c +++ b/arch/powerpc/kvm/book3s_xive.c @@ -179,12 +179,13 @@ void kvmppc_xive_pull_vcpu(struct kvm_vcpu *vcpu) } EXPORT_SYMBOL_GPL(kvmppc_xive_pull_vcpu); -void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) +bool kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) { void __iomem *esc_vaddr = (void __iomem *)vcpu->arch.xive_esc_vaddr; + bool ret = true; if (!esc_vaddr) - return; + return ret; /* we are using XIVE with single escalation */ @@ -197,7 +198,7 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) * we also don't want to set xive_esc_on to 1 here in * case we race with xive_esc_irq(). */ - vcpu->arch.ceded = 0; + ret = false; /* * The escalation interrupts are special as we don't EOI them. * There is no need to use the load-after-store ordering offset @@ -210,6 +211,8 @@ void kvmppc_xive_rearm_escalation(struct kvm_vcpu *vcpu) __raw_readq(esc_vaddr + XIVE_ESB_SET_PQ_00); } mb(); + + return ret; } EXPORT_SYMBOL_GPL(kvmppc_xive_rearm_escalation); |