summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@ozlabs.org>2017-01-30 13:21:45 +0300
committerMichael Ellerman <mpe@ellerman.id.au>2017-01-31 11:11:48 +0300
commitf4c51f841d2ac7d36cacb84efbc383190861f87c (patch)
tree7592614fcde2aee646cda406e9e29ee8ef134f85 /arch/powerpc/kvm
parent9e04ba69beec372ddf857c700ff922e95f50b0d0 (diff)
downloadlinux-f4c51f841d2ac7d36cacb84efbc383190861f87c.tar.xz
KVM: PPC: Book3S HV: Modify guest entry/exit paths to handle radix guests
This adds code to branch around the parts that radix guests don't need - clearing and loading the SLB with the guest SLB contents, saving the guest SLB contents on exit, and restoring the host SLB contents. Since the host is now using radix, we need to save and restore the host value for the PID register. On hypervisor data/instruction storage interrupts, we don't do the guest HPT lookup on radix, but just save the guest physical address for the fault (from the ASDR register) in the vcpu struct. Signed-off-by: Paul Mackerras <paulus@ozlabs.org> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S57
1 files changed, 46 insertions, 11 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 01f4392a284d..7fc7a9221509 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -518,6 +518,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
/* Stack frame offsets */
#define STACK_SLOT_TID (112-16)
#define STACK_SLOT_PSSCR (112-24)
+#define STACK_SLOT_PID (112-32)
.global kvmppc_hv_entry
kvmppc_hv_entry:
@@ -530,6 +531,7 @@ kvmppc_hv_entry:
* R1 = host R1
* R2 = TOC
* all other volatile GPRS = free
+ * Does not preserve non-volatile GPRs or CR fields
*/
mflr r0
std r0, PPC_LR_STKOFF(r1)
@@ -549,32 +551,38 @@ kvmppc_hv_entry:
bl kvmhv_start_timing
1:
#endif
- /* Clear out SLB */
+
+ /* Use cr7 as an indication of radix mode */
+ ld r5, HSTATE_KVM_VCORE(r13)
+ ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
+ lbz r0, KVM_RADIX(r9)
+ cmpwi cr7, r0, 0
+
+ /* Clear out SLB if hash */
+ bne cr7, 2f
li r6,0
slbmte r6,r6
slbia
ptesync
-
+2:
/*
* POWER7/POWER8 host -> guest partition switch code.
* We don't have to lock against concurrent tlbies,
* but we do have to coordinate across hardware threads.
*/
/* Set bit in entry map iff exit map is zero. */
- ld r5, HSTATE_KVM_VCORE(r13)
li r7, 1
lbz r6, HSTATE_PTID(r13)
sld r7, r7, r6
- addi r9, r5, VCORE_ENTRY_EXIT
-21: lwarx r3, 0, r9
+ addi r8, r5, VCORE_ENTRY_EXIT
+21: lwarx r3, 0, r8
cmpwi r3, 0x100 /* any threads starting to exit? */
bge secondary_too_late /* if so we're too late to the party */
or r3, r3, r7
- stwcx. r3, 0, r9
+ stwcx. r3, 0, r8
bne 21b
/* Primary thread switches to guest partition. */
- ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
cmpwi r6,0
bne 10f
lwz r7,KVM_LPID(r9)
@@ -658,7 +666,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
beq kvmppc_primary_no_guest
kvmppc_got_guest:
- /* Load up guest SLB entries */
+ /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
lwz r5,VCPU_SLB_MAX(r4)
cmpwi r5,0
beq 9f
@@ -696,8 +704,10 @@ kvmppc_got_guest:
BEGIN_FTR_SECTION
mfspr r5, SPRN_TIDR
mfspr r6, SPRN_PSSCR
+ mfspr r7, SPRN_PID
std r5, STACK_SLOT_TID(r1)
std r6, STACK_SLOT_PSSCR(r1)
+ std r7, STACK_SLOT_PID(r1)
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
BEGIN_FTR_SECTION
@@ -1293,11 +1303,15 @@ mc_cont:
mtspr SPRN_CTRLT,r6
4:
/* Read the guest SLB and save it away */
+ ld r5, VCPU_KVM(r9)
+ lbz r0, KVM_RADIX(r5)
+ cmpwi r0, 0
+ li r5, 0
+ bne 3f /* for radix, save 0 entries */
lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
mtctr r0
li r6,0
addi r7,r9,VCPU_SLB
- li r5,0
1: slbmfee r8,r6
andis. r0,r8,SLB_ESID_V@h
beq 2f
@@ -1309,7 +1323,7 @@ mc_cont:
addi r5,r5,1
2: addi r6,r6,1
bdnz 1b
- stw r5,VCPU_SLB_MAX(r9)
+3: stw r5,VCPU_SLB_MAX(r9)
/*
* Save the guest PURR/SPURR
@@ -1558,8 +1572,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
BEGIN_FTR_SECTION
ld r5, STACK_SLOT_TID(r1)
ld r6, STACK_SLOT_PSSCR(r1)
+ ld r7, STACK_SLOT_PID(r1)
mtspr SPRN_TIDR, r5
mtspr SPRN_PSSCR, r6
+ mtspr SPRN_PID, r7
END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
/*
@@ -1671,6 +1687,9 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
isync
/* load host SLB entries */
+BEGIN_MMU_FTR_SECTION
+ b 0f
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
ld r8,PACA_SLBSHADOWPTR(r13)
.rept SLB_NUM_BOLTED
@@ -1683,7 +1702,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
slbmte r6,r5
1: addi r8,r8,16
.endr
-
+0:
#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
/* Finish timing, if we have a vcpu */
ld r4, HSTATE_KVM_VCPU(r13)
@@ -1710,8 +1729,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
* reflect the HDSI to the guest as a DSI.
*/
kvmppc_hdsi:
+ ld r3, VCPU_KVM(r9)
+ lbz r0, KVM_RADIX(r3)
+ cmpwi r0, 0
mfspr r4, SPRN_HDAR
mfspr r6, SPRN_HDSISR
+ bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
/* HPTE not found fault or protection fault? */
andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
beq 1f /* if not, send it to the guest */
@@ -1788,11 +1811,23 @@ fast_interrupt_c_return:
stb r0, HSTATE_IN_GUEST(r13)
b guest_exit_cont
+.Lradix_hdsi:
+ std r4, VCPU_FAULT_DAR(r9)
+ stw r6, VCPU_FAULT_DSISR(r9)
+.Lradix_hisi:
+ mfspr r5, SPRN_ASDR
+ std r5, VCPU_FAULT_GPA(r9)
+ b guest_exit_cont
+
/*
* Similarly for an HISI, reflect it to the guest as an ISI unless
* it is an HPTE not found fault for a page that we have paged out.
*/
kvmppc_hisi:
+ ld r3, VCPU_KVM(r9)
+ lbz r0, KVM_RADIX(r3)
+ cmpwi r0, 0
+ bne .Lradix_hisi /* for radix, just save ASDR */
andis. r0, r11, SRR1_ISI_NOPT@h
beq 1f
BEGIN_FTR_SECTION