From 7ef4e985d54bad2773f260da38530f858a9a8491 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 10 May 2012 03:54:58 +0200 Subject: KVM: PPC: Book3S: PR: Handle EMUL_ASSIST In addition to normal "priviledged instruction" traps, we can also receive "emulation assist" traps on newer hardware that has the HV bit set. Handle that one the same way as a privileged instruction, including the instruction fetching. That way we don't execute old instructions that we happen to still leave in that field when an emul assist trap comes. This fixes -M mac99 / -M g3beige on p7 bare metal for me. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_segment.S | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'arch/powerpc/kvm/book3s_segment.S') diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 0676ae249b9f..012fc9281213 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -250,6 +250,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) beq ld_last_prev_inst cmpwi r12, BOOK3S_INTERRUPT_ALIGNMENT beq- ld_last_inst +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + cmpwi r12, BOOK3S_INTERRUPT_H_EMUL_ASSIST + beq- ld_last_inst +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif b no_ld_last_inst -- cgit v1.2.3 From 56e13dbae3eddb1648e6e94ae251c83cdc8304e0 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Fri, 27 Apr 2012 16:33:35 +0200 Subject: KVM: PPC: Fix PR KVM on POWER7 bare metal When running on a system that is HV capable, some interrupts use HSRR SPRs instead of the normal SRR SPRs. These are also used in the Linux handlers to jump back to code after an interrupt got processed. Unfortunately, in our "jump back to the real host handler after we've done the context switch" code, we were only setting the SRR SPRs, rendering Linux to jump back to some invalid IP after it's processed the interrupt. This fixes random crashes on p7 opal mode with PR KVM for me. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_segment.S | 35 ++++++++++++++++++++++------------- 1 file changed, 22 insertions(+), 13 deletions(-) (limited to 'arch/powerpc/kvm/book3s_segment.S') diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 012fc9281213..87cfc1def241 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,6 +197,7 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION + mr r10, r12 andi. r0,r12,0x2 beq 1f mfspr r3,SPRN_HSRR0 @@ -322,23 +323,17 @@ no_dcbz32_off: * Having set up SRR0/1 with the address where we want * to continue with relocation on (potentially in module * space), we either just go straight there with rfi[d], - * or we jump to an interrupt handler with bctr if there - * is an interrupt to be handled first. In the latter - * case, the rfi[d] at the end of the interrupt handler - * will get us back to where we want to continue. + * or we jump to an interrupt handler if there is an + * interrupt to be handled first. In the latter case, + * the rfi[d] at the end of the interrupt handler will + * get us back to where we want to continue. */ - cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER - beq 1f - cmpwi r12, BOOK3S_INTERRUPT_PERFMON -1: mtctr r12 - /* Register usage at this point: * * R1 = host R1 * R2 = host R2 + * R10 = raw exit handler id * R12 = exit handler id * R13 = shadow vcpu (32-bit) or PACA (64-bit) * SVCPU.* = guest * @@ -348,12 +343,26 @@ no_dcbz32_off: PPC_LL r6, HSTATE_HOST_MSR(r13) PPC_LL r8, HSTATE_VMHANDLER(r13) - /* Restore host msr -> SRR1 */ +#ifdef CONFIG_PPC64 +BEGIN_FTR_SECTION + andi. r0,r10,0x2 + beq 1f + mtspr SPRN_HSRR1, r6 + mtspr SPRN_HSRR0, r8 +END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) +#endif +1: /* Restore host msr -> SRR1 */ mtsrr1 r6 /* Load highmem handler address */ mtsrr0 r8 /* RFI into the highmem handler, or jump to interrupt handler */ - beqctr + cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL + beqa BOOK3S_INTERRUPT_EXTERNAL + cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER + beqa BOOK3S_INTERRUPT_DECREMENTER + cmpwi r12, BOOK3S_INTERRUPT_PERFMON + beqa BOOK3S_INTERRUPT_PERFMON + RFI kvmppc_handler_trampoline_exit_end: -- cgit v1.2.3 From 32c7dbfd479e73684b0d23fcb0a5cb04f19d86f4 Mon Sep 17 00:00:00 2001 From: Alexander Graf Date: Thu, 10 May 2012 03:58:50 +0200 Subject: KVM: PPC: Book3S: PR: Fix hsrr code When jumping back into the kernel to code that knows that it would be using HSRR registers instead of SRR registers, we need to make sure we pass it all information on where to jump to in HSRR registers. Unfortunately, we used r10 to store the information to distinguish between the HSRR and SRR case. That register got clobbered in between though, rendering the later comparison invalid. Instead, let's use cr1 to store this information. That way we don't need yet another register and everyone's happy. This fixes PR KVM on POWER7 bare metal for me. Signed-off-by: Alexander Graf --- arch/powerpc/kvm/book3s_segment.S | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'arch/powerpc/kvm/book3s_segment.S') diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 87cfc1def241..6e6e9cef34a8 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S @@ -197,8 +197,8 @@ kvmppc_interrupt: /* Save guest PC and MSR */ #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - mr r10, r12 - andi. r0,r12,0x2 + andi. r0, r12, 0x2 + cmpwi cr1, r0, 0 beq 1f mfspr r3,SPRN_HSRR0 mfspr r4,SPRN_HSRR1 @@ -345,8 +345,7 @@ no_dcbz32_off: #ifdef CONFIG_PPC64 BEGIN_FTR_SECTION - andi. r0,r10,0x2 - beq 1f + beq cr1, 1f mtspr SPRN_HSRR1, r6 mtspr SPRN_HSRR0, r8 END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) -- cgit v1.2.3