summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm/emulate_loadstore.c
diff options
context:
space:
mode:
authorNicholas Piggin <npiggin@gmail.com>2023-03-30 13:32:23 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2023-04-03 07:54:44 +0300
commit460ba21d83fef766a5d34260e464c9ab8f10aa05 (patch)
treea71c29e754b6227851569ebd2ba8f2ec297445d0 /arch/powerpc/kvm/emulate_loadstore.c
parent43d05c6123ca1ace5982ca326c156502e735b7d5 (diff)
downloadlinux-460ba21d83fef766a5d34260e464c9ab8f10aa05.tar.xz
KVM: PPC: Permit SRR1 flags in more injected interrupt types
The prefix architecture in ISA v3.1 introduces a prefixed bit in SRR1 for many types of synchronous interrupts which is set when the interrupt is caused by a prefixed instruction. This requires KVM to be able to set this bit when injecting interrupts into a guest. Plumb through the SRR1 "flags" argument to the core_queue APIs where it's missing for this. For now they are set to 0, which is no change. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> [mpe: Fixup kvmppc_core_queue_alignment() in booke.c] Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://msgid.link/20230330103224.3589928-2-npiggin@gmail.com
Diffstat (limited to 'arch/powerpc/kvm/emulate_loadstore.c')
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index cfc9114b87d0..e324a174b585 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -28,7 +28,7 @@
static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
- kvmppc_core_queue_fpunavail(vcpu);
+ kvmppc_core_queue_fpunavail(vcpu, 0);
return true;
}
@@ -40,7 +40,7 @@ static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
- kvmppc_core_queue_vsx_unavail(vcpu);
+ kvmppc_core_queue_vsx_unavail(vcpu, 0);
return true;
}
@@ -52,7 +52,7 @@ static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
static bool kvmppc_check_altivec_disabled(struct kvm_vcpu *vcpu)
{
if (!(kvmppc_get_msr(vcpu) & MSR_VEC)) {
- kvmppc_core_queue_vec_unavail(vcpu);
+ kvmppc_core_queue_vec_unavail(vcpu, 0);
return true;
}