summaryrefslogtreecommitdiff
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s.c162
-rw-r--r--arch/powerpc/kvm/book3s.h3
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c47
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c6
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S3
-rw-r--r--arch/powerpc/kvm/book3s_pr.c6
-rw-r--r--arch/powerpc/kvm/booke.c287
-rw-r--r--arch/powerpc/kvm/booke.h40
-rw-r--r--arch/powerpc/kvm/booke_emulate.c163
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S13
-rw-r--r--arch/powerpc/kvm/e500.h20
-rw-r--r--arch/powerpc/kvm/e500_emulate.c20
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c20
-rw-r--r--arch/powerpc/kvm/e500mc.c60
-rw-r--r--arch/powerpc/kvm/emulate.c17
-rw-r--r--arch/powerpc/kvm/emulate_loadstore.c2
-rw-r--r--arch/powerpc/kvm/powerpc.c134
18 files changed, 706 insertions, 305 deletions
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index dd03f6b299ba..b32db4b95361 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -535,174 +535,111 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
return -ENOTSUPP;
}
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
- int r;
- union kvmppc_one_reg val;
- int size;
+ int r = 0;
long int i;
- size = one_reg_size(reg->id);
- if (size > sizeof(val))
- return -EINVAL;
-
- r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_DAR:
- val = get_reg_val(reg->id, kvmppc_get_dar(vcpu));
+ *val = get_reg_val(id, kvmppc_get_dar(vcpu));
break;
case KVM_REG_PPC_DSISR:
- val = get_reg_val(reg->id, kvmppc_get_dsisr(vcpu));
+ *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
- i = reg->id - KVM_REG_PPC_FPR0;
- val = get_reg_val(reg->id, VCPU_FPR(vcpu, i));
+ i = id - KVM_REG_PPC_FPR0;
+ *val = get_reg_val(id, VCPU_FPR(vcpu, i));
break;
case KVM_REG_PPC_FPSCR:
- val = get_reg_val(reg->id, vcpu->arch.fp.fpscr);
- break;
-#ifdef CONFIG_ALTIVEC
- case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
- break;
- case KVM_REG_PPC_VSCR:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ *val = get_reg_val(id, vcpu->arch.fp.fpscr);
break;
- case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
- break;
-#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
- long int i = reg->id - KVM_REG_PPC_VSR0;
- val.vsxval[0] = vcpu->arch.fp.fpr[i][0];
- val.vsxval[1] = vcpu->arch.fp.fpr[i][1];
+ i = id - KVM_REG_PPC_VSR0;
+ val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
+ val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
} else {
r = -ENXIO;
}
break;
#endif /* CONFIG_VSX */
- case KVM_REG_PPC_DEBUG_INST: {
- u32 opcode = INS_TW;
- r = copy_to_user((u32 __user *)(long)reg->addr,
- &opcode, sizeof(u32));
+ case KVM_REG_PPC_DEBUG_INST:
+ *val = get_reg_val(id, INS_TW);
break;
- }
#ifdef CONFIG_KVM_XICS
case KVM_REG_PPC_ICP_STATE:
if (!vcpu->arch.icp) {
r = -ENXIO;
break;
}
- val = get_reg_val(reg->id, kvmppc_xics_get_icp(vcpu));
+ *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
- val = get_reg_val(reg->id, vcpu->arch.fscr);
+ *val = get_reg_val(id, vcpu->arch.fscr);
break;
case KVM_REG_PPC_TAR:
- val = get_reg_val(reg->id, vcpu->arch.tar);
+ *val = get_reg_val(id, vcpu->arch.tar);
break;
case KVM_REG_PPC_EBBHR:
- val = get_reg_val(reg->id, vcpu->arch.ebbhr);
+ *val = get_reg_val(id, vcpu->arch.ebbhr);
break;
case KVM_REG_PPC_EBBRR:
- val = get_reg_val(reg->id, vcpu->arch.ebbrr);
+ *val = get_reg_val(id, vcpu->arch.ebbrr);
break;
case KVM_REG_PPC_BESCR:
- val = get_reg_val(reg->id, vcpu->arch.bescr);
+ *val = get_reg_val(id, vcpu->arch.bescr);
break;
case KVM_REG_PPC_VTB:
- val = get_reg_val(reg->id, vcpu->arch.vtb);
+ *val = get_reg_val(id, vcpu->arch.vtb);
break;
case KVM_REG_PPC_IC:
- val = get_reg_val(reg->id, vcpu->arch.ic);
+ *val = get_reg_val(id, vcpu->arch.ic);
break;
default:
r = -EINVAL;
break;
}
}
- if (r)
- return r;
-
- if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
- r = -EFAULT;
return r;
}
-int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
- int r;
- union kvmppc_one_reg val;
- int size;
+ int r = 0;
long int i;
- size = one_reg_size(reg->id);
- if (size > sizeof(val))
- return -EINVAL;
-
- if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
- return -EFAULT;
-
- r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
if (r == -EINVAL) {
r = 0;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_DAR:
- kvmppc_set_dar(vcpu, set_reg_val(reg->id, val));
+ kvmppc_set_dar(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_DSISR:
- kvmppc_set_dsisr(vcpu, set_reg_val(reg->id, val));
+ kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
break;
case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
- i = reg->id - KVM_REG_PPC_FPR0;
- VCPU_FPR(vcpu, i) = set_reg_val(reg->id, val);
+ i = id - KVM_REG_PPC_FPR0;
+ VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
break;
case KVM_REG_PPC_FPSCR:
- vcpu->arch.fp.fpscr = set_reg_val(reg->id, val);
- break;
-#ifdef CONFIG_ALTIVEC
- case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
- break;
- case KVM_REG_PPC_VSCR:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
- break;
- case KVM_REG_PPC_VRSAVE:
- if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
- r = -ENXIO;
- break;
- }
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ vcpu->arch.fp.fpscr = set_reg_val(id, *val);
break;
-#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
if (cpu_has_feature(CPU_FTR_VSX)) {
- long int i = reg->id - KVM_REG_PPC_VSR0;
- vcpu->arch.fp.fpr[i][0] = val.vsxval[0];
- vcpu->arch.fp.fpr[i][1] = val.vsxval[1];
+ i = id - KVM_REG_PPC_VSR0;
+ vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
+ vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
} else {
r = -ENXIO;
}
@@ -715,29 +652,29 @@ int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
break;
}
r = kvmppc_xics_set_icp(vcpu,
- set_reg_val(reg->id, val));
+ set_reg_val(id, *val));
break;
#endif /* CONFIG_KVM_XICS */
case KVM_REG_PPC_FSCR:
- vcpu->arch.fscr = set_reg_val(reg->id, val);
+ vcpu->arch.fscr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_TAR:
- vcpu->arch.tar = set_reg_val(reg->id, val);
+ vcpu->arch.tar = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EBBHR:
- vcpu->arch.ebbhr = set_reg_val(reg->id, val);
+ vcpu->arch.ebbhr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EBBRR:
- vcpu->arch.ebbrr = set_reg_val(reg->id, val);
+ vcpu->arch.ebbrr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_BESCR:
- vcpu->arch.bescr = set_reg_val(reg->id, val);
+ vcpu->arch.bescr = set_reg_val(id, *val);
break;
case KVM_REG_PPC_VTB:
- vcpu->arch.vtb = set_reg_val(reg->id, val);
+ vcpu->arch.vtb = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IC:
- vcpu->arch.ic = set_reg_val(reg->id, val);
+ vcpu->arch.ic = set_reg_val(id, *val);
break;
default:
r = -EINVAL;
@@ -778,13 +715,12 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
struct kvm_guest_debug *dbg)
{
- return -EINVAL;
+ vcpu->guest_debug = dbg->control;
+ return 0;
}
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
kvmppc_core_queue_dec(vcpu);
kvm_vcpu_kick(vcpu);
}
@@ -851,9 +787,9 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
}
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
- return kvm->arch.kvm_ops->age_hva(kvm, hva);
+ return kvm->arch.kvm_ops->age_hva(kvm, start, end);
}
int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
diff --git a/arch/powerpc/kvm/book3s.h b/arch/powerpc/kvm/book3s.h
index 4bf956cf94d6..d2b3ec088b8c 100644
--- a/arch/powerpc/kvm/book3s.h
+++ b/arch/powerpc/kvm/book3s.h
@@ -17,7 +17,8 @@ extern void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
extern int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva);
extern int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start,
unsigned long end);
-extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva);
+extern int kvm_age_hva_hv(struct kvm *kvm, unsigned long start,
+ unsigned long end);
extern int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva);
extern void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte);
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 72c20bb16d26..d40770248b6a 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -62,10 +62,10 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
}
kvm->arch.hpt_cma_alloc = 0;
- page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
+ page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT));
if (page) {
hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
- memset((void *)hpt, 0, (1 << order));
+ memset((void *)hpt, 0, (1ul << order));
kvm->arch.hpt_cma_alloc = 1;
}
@@ -1002,11 +1002,11 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
return ret;
}
-int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva_hv(struct kvm *kvm, unsigned long start, unsigned long end)
{
if (!kvm->arch.using_mmu_notifiers)
return 0;
- return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
+ return kvm_handle_hva_range(kvm, start, end, kvm_age_rmapp);
}
static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 27cced9c7249..e63587d30b70 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -725,6 +725,30 @@ static int kvmppc_hcall_impl_hv(unsigned long cmd)
return kvmppc_hcall_impl_hv_realmode(cmd);
}
+static int kvmppc_emulate_debug_inst(struct kvm_run *run,
+ struct kvm_vcpu *vcpu)
+{
+ u32 last_inst;
+
+ if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
+ EMULATE_DONE) {
+ /*
+ * Fetch failed, so return to guest and
+ * try executing it again.
+ */
+ return RESUME_GUEST;
+ }
+
+ if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.address = kvmppc_get_pc(vcpu);
+ return RESUME_HOST;
+ } else {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ return RESUME_GUEST;
+ }
+}
+
static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
struct task_struct *tsk)
{
@@ -807,12 +831,18 @@ static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
/*
* This occurs if the guest executes an illegal instruction.
- * We just generate a program interrupt to the guest, since
- * we don't emulate any guest instructions at this stage.
+ * If the guest debug is disabled, generate a program interrupt
+ * to the guest. If guest debug is enabled, we need to check
+ * whether the instruction is a software breakpoint instruction.
+ * Accordingly return to Guest or Host.
*/
case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
- kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
- r = RESUME_GUEST;
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
+ r = kvmppc_emulate_debug_inst(run, vcpu);
+ } else {
+ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
+ r = RESUME_GUEST;
+ }
break;
/*
* This occurs if the guest (kernel or userspace), does something that
@@ -856,7 +886,9 @@ static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
{
int i, j;
- kvmppc_set_pvr_hv(vcpu, sregs->pvr);
+ /* Only accept the same PVR as the host's, since we can't spoof it */
+ if (sregs->pvr != vcpu->arch.pvr)
+ return -EINVAL;
j = 0;
for (i = 0; i < vcpu->arch.slb_nr; i++) {
@@ -922,6 +954,9 @@ static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
long int i;
switch (id) {
+ case KVM_REG_PPC_DEBUG_INST:
+ *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
+ break;
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, 0);
break;
@@ -1489,7 +1524,7 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
static int kvmppc_grab_hwthread(int cpu)
{
struct paca_struct *tpaca;
- long timeout = 1000;
+ long timeout = 10000;
tpaca = &paca[cpu];
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index b9615ba5b083..4fdc27c80f4c 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -163,6 +163,12 @@ void __init kvm_cma_reserve(void)
unsigned long align_size;
struct memblock_region *reg;
phys_addr_t selected_size = 0;
+
+ /*
+ * We need CMA reservation only when we are in HV mode
+ */
+ if (!cpu_has_feature(CPU_FTR_HVMODE))
+ return;
/*
* We cannot use memblock_phys_mem_size() here, because
* memblock_analyze() has not been called yet.
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index f0c4db7704c3..edb2ccdbb2ba 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -355,6 +355,7 @@ kvmppc_hv_entry:
* MSR = ~IR|DR
* R13 = PACA
* R1 = host R1
+ * R2 = TOC
* all other volatile GPRS = free
*/
mflr r0
@@ -503,7 +504,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
toc_tlbie_lock:
.tc native_tlbie_lock[TC],native_tlbie_lock
.previous
- ld r3,toc_tlbie_lock@toc(2)
+ ld r3,toc_tlbie_lock@toc(r2)
#ifdef __BIG_ENDIAN__
lwz r8,PACA_LOCK_TOKEN(r13)
#else
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index faffb27badd9..cf2eb16846d1 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -295,7 +295,8 @@ static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
return 0;
}
-static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
+static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
+ unsigned long end)
{
/* XXX could be more clever ;) */
return 0;
@@ -1319,6 +1320,9 @@ static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
int r = 0;
switch (id) {
+ case KVM_REG_PPC_DEBUG_INST:
+ *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
+ break;
case KVM_REG_PPC_HIOR:
*val = get_reg_val(id, to_book3s(vcpu)->hior);
break;
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index b4c89fa6f109..9b55dec2d6cc 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,6 +124,40 @@ static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
}
#endif
+/*
+ * Load up guest vcpu FP state if it's needed.
+ * It also set the MSR_FP in thread so that host know
+ * we're holding FPU, and then host can help to save
+ * guest vcpu FP state if other threads require to use FPU.
+ * This simulates an FP unavailable fault.
+ *
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (!(current->thread.regs->msr & MSR_FP)) {
+ enable_kernel_fp();
+ load_fp_state(&vcpu->arch.fp);
+ current->thread.fp_save_area = &vcpu->arch.fp;
+ current->thread.regs->msr |= MSR_FP;
+ }
+#endif
+}
+
+/*
+ * Save guest vcpu FP state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_PPC_FPU
+ if (current->thread.regs->msr & MSR_FP)
+ giveup_fpu(current);
+ current->thread.fp_save_area = NULL;
+#endif
+}
+
static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
{
#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
@@ -134,6 +168,40 @@ static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
#endif
}
+/*
+ * Simulate AltiVec unavailable fault to load guest state
+ * from thread to AltiVec unit.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ if (!(current->thread.regs->msr & MSR_VEC)) {
+ enable_kernel_altivec();
+ load_vr_state(&vcpu->arch.vr);
+ current->thread.vr_save_area = &vcpu->arch.vr;
+ current->thread.regs->msr |= MSR_VEC;
+ }
+ }
+#endif
+}
+
+/*
+ * Save guest vcpu AltiVec state into thread.
+ * It requires to be called with preemption disabled.
+ */
+static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
+{
+#ifdef CONFIG_ALTIVEC
+ if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ if (current->thread.regs->msr & MSR_VEC)
+ giveup_altivec(current);
+ current->thread.vr_save_area = NULL;
+ }
+#endif
+}
+
static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
{
/* Synchronize guest's desire to get debug interrupts into shadow MSR */
@@ -267,6 +335,16 @@ static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
}
+void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
+{
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
+}
+
+void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
+{
+ clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
+}
+
static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
{
kvmppc_set_srr0(vcpu, srr0);
@@ -341,9 +419,15 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
case BOOKE_IRQPRIO_ITLB_MISS:
case BOOKE_IRQPRIO_SYSCALL:
case BOOKE_IRQPRIO_FP_UNAVAIL:
+#ifdef CONFIG_SPE_POSSIBLE
case BOOKE_IRQPRIO_SPE_UNAVAIL:
case BOOKE_IRQPRIO_SPE_FP_DATA:
case BOOKE_IRQPRIO_SPE_FP_ROUND:
+#endif
+#ifdef CONFIG_ALTIVEC
+ case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
+ case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
+#endif
case BOOKE_IRQPRIO_AP_UNAVAIL:
allowed = 1;
msr_mask = MSR_CE | MSR_ME | MSR_DE;
@@ -377,7 +461,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
allowed = vcpu->arch.shared->msr & MSR_DE;
allowed = allowed && !crit;
msr_mask = MSR_ME;
- int_class = INT_CLASS_CRIT;
+ if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
+ int_class = INT_CLASS_DBG;
+ else
+ int_class = INT_CLASS_CRIT;
+
break;
}
@@ -654,20 +742,27 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
/*
* Since we can't trap on MSR_FP in GS-mode, we consider the guest
- * as always using the FPU. Kernel usage of FP (via
- * enable_kernel_fp()) in this thread must not occur while
- * vcpu->fpu_active is set.
+ * as always using the FPU.
*/
- vcpu->fpu_active = 1;
-
kvmppc_load_guest_fp(vcpu);
#endif
+#ifdef CONFIG_ALTIVEC
+ /* Save userspace AltiVec state in stack */
+ if (cpu_has_feature(CPU_FTR_ALTIVEC))
+ enable_kernel_altivec();
+ /*
+ * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
+ * as always using the AltiVec.
+ */
+ kvmppc_load_guest_altivec(vcpu);
+#endif
+
/* Switch to guest debug context */
- debug = vcpu->arch.shadow_dbg_reg;
+ debug = vcpu->arch.dbg_reg;
switch_booke_debug_regs(&debug);
debug = current->thread.debug;
- current->thread.debug = vcpu->arch.shadow_dbg_reg;
+ current->thread.debug = vcpu->arch.dbg_reg;
vcpu->arch.pgdir = current->mm->pgd;
kvmppc_fix_ee_before_entry();
@@ -683,8 +778,10 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
+#endif
- vcpu->fpu_active = 0;
+#ifdef CONFIG_ALTIVEC
+ kvmppc_save_guest_altivec(vcpu);
#endif
out:
@@ -728,9 +825,36 @@ static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+ struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
u32 dbsr = vcpu->arch.dbsr;
+ if (vcpu->guest_debug == 0) {
+ /*
+ * Debug resources belong to Guest.
+ * Imprecise debug event is not injected
+ */
+ if (dbsr & DBSR_IDE) {
+ dbsr &= ~DBSR_IDE;
+ if (!dbsr)
+ return RESUME_GUEST;
+ }
+
+ if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
+ (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
+ kvmppc_core_queue_debug(vcpu);
+
+ /* Inject a program interrupt if trap debug is not allowed */
+ if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
+ kvmppc_core_queue_program(vcpu, ESR_PTR);
+
+ return RESUME_GUEST;
+ }
+
+ /*
+ * Debug resource owned by userspace.
+ * Clear guest dbsr (vcpu->arch.dbsr)
+ */
+ vcpu->arch.dbsr = 0;
run->debug.arch.status = 0;
run->debug.arch.address = vcpu->arch.pc;
@@ -868,7 +992,12 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
case BOOKE_INTERRUPT_DATA_STORAGE:
case BOOKE_INTERRUPT_DTLB_MISS:
case BOOKE_INTERRUPT_HV_PRIV:
- emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
+ emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
+ break;
+ case BOOKE_INTERRUPT_PROGRAM:
+ /* SW breakpoints arrive as illegal instructions on HV */
+ if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
break;
default:
break;
@@ -947,6 +1076,18 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
break;
case BOOKE_INTERRUPT_PROGRAM:
+ if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
+ (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
+ /*
+ * We are here because of an SW breakpoint instr,
+ * so lets return to host to handle.
+ */
+ r = kvmppc_handle_debug(run, vcpu);
+ run->exit_reason = KVM_EXIT_DEBUG;
+ kvmppc_account_exit(vcpu, DEBUG_EXITS);
+ break;
+ }
+
if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
/*
* Program traps generated by user-level software must
@@ -991,7 +1132,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
r = RESUME_GUEST;
break;
-#else
+#elif defined(CONFIG_SPE_POSSIBLE)
case BOOKE_INTERRUPT_SPE_UNAVAIL:
/*
* Guest wants SPE, but host kernel doesn't support it. Send
@@ -1012,6 +1153,22 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->hw.hardware_exit_reason = exit_nr;
r = RESUME_HOST;
break;
+#endif /* CONFIG_SPE_POSSIBLE */
+
+/*
+ * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
+ * see kvmppc_core_check_processor_compat().
+ */
+#ifdef CONFIG_ALTIVEC
+ case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
+ r = RESUME_GUEST;
+ break;
+
+ case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
+ kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
+ r = RESUME_GUEST;
+ break;
#endif
case BOOKE_INTERRUPT_DATA_STORAGE:
@@ -1188,6 +1345,8 @@ out:
else {
/* interrupts now hard-disabled */
kvmppc_fix_ee_before_entry();
+ kvmppc_load_guest_fp(vcpu);
+ kvmppc_load_guest_altivec(vcpu);
}
}
@@ -1243,6 +1402,11 @@ int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
(unsigned long)vcpu);
+ /*
+ * Clear DBSR.MRR to avoid guest debug interrupt as
+ * this is of host interest
+ */
+ mtspr(SPRN_DBSR, DBSR_MRR);
return 0;
}
@@ -1457,144 +1621,125 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
}
-int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
- union kvmppc_one_reg val;
- int size;
-
- size = one_reg_size(reg->id);
- if (size > sizeof(val))
- return -EINVAL;
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_IAC1:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
break;
case KVM_REG_PPC_IAC2:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
break;
case KVM_REG_PPC_IAC4:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
break;
#endif
case KVM_REG_PPC_DAC1:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
break;
case KVM_REG_PPC_DAC2:
- val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
+ *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
break;
case KVM_REG_PPC_EPR: {
u32 epr = kvmppc_get_epr(vcpu);
- val = get_reg_val(reg->id, epr);
+ *val = get_reg_val(id, epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR:
- val = get_reg_val(reg->id, vcpu->arch.epcr);
+ *val = get_reg_val(id, vcpu->arch.epcr);
break;
#endif
case KVM_REG_PPC_TCR:
- val = get_reg_val(reg->id, vcpu->arch.tcr);
+ *val = get_reg_val(id, vcpu->arch.tcr);
break;
case KVM_REG_PPC_TSR:
- val = get_reg_val(reg->id, vcpu->arch.tsr);
+ *val = get_reg_val(id, vcpu->arch.tsr);
break;
case KVM_REG_PPC_DEBUG_INST:
- val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
+ *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
break;
case KVM_REG_PPC_VRSAVE:
- val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ *val = get_reg_val(id, vcpu->arch.vrsave);
break;
default:
- r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
break;
}
- if (r)
- return r;
-
- if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
- r = -EFAULT;
-
return r;
}
-int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
+ union kvmppc_one_reg *val)
{
int r = 0;
- union kvmppc_one_reg val;
- int size;
- size = one_reg_size(reg->id);
- if (size > sizeof(val))
- return -EINVAL;
-
- if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
- return -EFAULT;
-
- switch (reg->id) {
+ switch (id) {
case KVM_REG_PPC_IAC1:
- vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAC2:
- vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
break;
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
case KVM_REG_PPC_IAC3:
- vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_IAC4:
- vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
break;
#endif
case KVM_REG_PPC_DAC1:
- vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_DAC2:
- vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
+ vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
break;
case KVM_REG_PPC_EPR: {
- u32 new_epr = set_reg_val(reg->id, val);
+ u32 new_epr = set_reg_val(id, *val);
kvmppc_set_epr(vcpu, new_epr);
break;
}
#if defined(CONFIG_64BIT)
case KVM_REG_PPC_EPCR: {
- u32 new_epcr = set_reg_val(reg->id, val);
+ u32 new_epcr = set_reg_val(id, *val);
kvmppc_set_epcr(vcpu, new_epcr);
break;
}
#endif
case KVM_REG_PPC_OR_TSR: {
- u32 tsr_bits = set_reg_val(reg->id, val);
+ u32 tsr_bits = set_reg_val(id, *val);
kvmppc_set_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_CLEAR_TSR: {
- u32 tsr_bits = set_reg_val(reg->id, val);
+ u32 tsr_bits = set_reg_val(id, *val);
kvmppc_clr_tsr_bits(vcpu, tsr_bits);
break;
}
case KVM_REG_PPC_TSR: {
- u32 tsr = set_reg_val(reg->id, val);
+ u32 tsr = set_reg_val(id, *val);
kvmppc_set_tsr(vcpu, tsr);
break;
}
case KVM_REG_PPC_TCR: {
- u32 tcr = set_reg_val(reg->id, val);
+ u32 tcr = set_reg_val(id, *val);
kvmppc_set_tcr(vcpu, tcr);
break;
}
case KVM_REG_PPC_VRSAVE:
- vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ vcpu->arch.vrsave = set_reg_val(id, *val);
break;
default:
- r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
+ r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
break;
}
@@ -1694,10 +1839,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
update_timer_ints(vcpu);
}
-void kvmppc_decrementer_func(unsigned long data)
+void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
-
if (vcpu->arch.tcr & TCR_ARE) {
vcpu->arch.dec = vcpu->arch.decar;
kvmppc_emulate_dec(vcpu);
@@ -1842,7 +1985,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
int n, b = 0, w = 0;
if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
- vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
+ vcpu->arch.dbg_reg.dbcr0 = 0;
vcpu->guest_debug = 0;
kvm_guest_protect_msr(vcpu, MSR_DE, false);
return 0;
@@ -1850,15 +1993,13 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
kvm_guest_protect_msr(vcpu, MSR_DE, true);
vcpu->guest_debug = dbg->control;
- vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
- /* Set DBCR0_EDM in guest visible DBCR0 register. */
- vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
+ vcpu->arch.dbg_reg.dbcr0 = 0;
if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
- vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
/* Code below handles only HW breakpoints */
- dbg_reg = &(vcpu->arch.shadow_dbg_reg);
+ dbg_reg = &(vcpu->arch.dbg_reg);
#ifdef CONFIG_KVM_BOOKE_HV
/*
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index f753543c56fa..22ba08ea68e9 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -32,9 +32,15 @@
#define BOOKE_IRQPRIO_ALIGNMENT 2
#define BOOKE_IRQPRIO_PROGRAM 3
#define BOOKE_IRQPRIO_FP_UNAVAIL 4
+#ifdef CONFIG_SPE_POSSIBLE
#define BOOKE_IRQPRIO_SPE_UNAVAIL 5
#define BOOKE_IRQPRIO_SPE_FP_DATA 6
#define BOOKE_IRQPRIO_SPE_FP_ROUND 7
+#endif
+#ifdef CONFIG_PPC_E500MC
+#define BOOKE_IRQPRIO_ALTIVEC_UNAVAIL 5
+#define BOOKE_IRQPRIO_ALTIVEC_ASSIST 6
+#endif
#define BOOKE_IRQPRIO_SYSCALL 8
#define BOOKE_IRQPRIO_AP_UNAVAIL 9
#define BOOKE_IRQPRIO_DTLB_MISS 10
@@ -116,40 +122,6 @@ extern int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn,
extern int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn,
ulong *spr_val);
-/*
- * Load up guest vcpu FP state if it's needed.
- * It also set the MSR_FP in thread so that host know
- * we're holding FPU, and then host can help to save
- * guest vcpu FP state if other threads require to use FPU.
- * This simulates an FP unavailable fault.
- *
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
- if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
- enable_kernel_fp();
- load_fp_state(&vcpu->arch.fp);
- current->thread.fp_save_area = &vcpu->arch.fp;
- current->thread.regs->msr |= MSR_FP;
- }
-#endif
-}
-
-/*
- * Save guest vcpu FP state into thread.
- * It requires to be called with preemption disabled.
- */
-static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
-{
-#ifdef CONFIG_PPC_FPU
- if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
- giveup_fpu(current);
- current->thread.fp_save_area = NULL;
-#endif
-}
-
static inline void kvmppc_clear_dbsr(void)
{
mtspr(SPRN_DBSR, mfspr(SPRN_DBSR));
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 28c158881d23..a82f64502de1 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -25,6 +25,7 @@
#define OP_19_XOP_RFI 50
#define OP_19_XOP_RFCI 51
+#define OP_19_XOP_RFDI 39
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
@@ -37,6 +38,12 @@ static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
}
+static void kvmppc_emul_rfdi(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.pc = vcpu->arch.dsrr0;
+ kvmppc_set_msr(vcpu, vcpu->arch.dsrr1);
+}
+
static void kvmppc_emul_rfci(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.csrr0;
@@ -65,6 +72,12 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
*advance = 0;
break;
+ case OP_19_XOP_RFDI:
+ kvmppc_emul_rfdi(vcpu);
+ kvmppc_set_exit_type(vcpu, EMULATED_RFDI_EXITS);
+ *advance = 0;
+ break;
+
default:
emulated = EMULATE_FAIL;
break;
@@ -118,6 +131,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
{
int emulated = EMULATE_DONE;
+ bool debug_inst = false;
switch (sprn) {
case SPRN_DEAR:
@@ -132,14 +146,128 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
case SPRN_CSRR1:
vcpu->arch.csrr1 = spr_val;
break;
+ case SPRN_DSRR0:
+ vcpu->arch.dsrr0 = spr_val;
+ break;
+ case SPRN_DSRR1:
+ vcpu->arch.dsrr1 = spr_val;
+ break;
+ case SPRN_IAC1:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.iac1 = spr_val;
+ break;
+ case SPRN_IAC2:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.iac2 = spr_val;
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ case SPRN_IAC3:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.iac3 = spr_val;
+ break;
+ case SPRN_IAC4:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.iac4 = spr_val;
+ break;
+#endif
+ case SPRN_DAC1:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.dac1 = spr_val;
+ break;
+ case SPRN_DAC2:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.dac2 = spr_val;
+ break;
case SPRN_DBCR0:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ spr_val &= (DBCR0_IDM | DBCR0_IC | DBCR0_BT | DBCR0_TIE |
+ DBCR0_IAC1 | DBCR0_IAC2 | DBCR0_IAC3 | DBCR0_IAC4 |
+ DBCR0_DAC1R | DBCR0_DAC1W | DBCR0_DAC2R | DBCR0_DAC2W);
+
vcpu->arch.dbg_reg.dbcr0 = spr_val;
break;
case SPRN_DBCR1:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
vcpu->arch.dbg_reg.dbcr1 = spr_val;
break;
+ case SPRN_DBCR2:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
+ debug_inst = true;
+ vcpu->arch.dbg_reg.dbcr2 = spr_val;
+ break;
case SPRN_DBSR:
+ /*
+ * If userspace is debugging guest then guest
+ * can not access debug registers.
+ */
+ if (vcpu->guest_debug)
+ break;
+
vcpu->arch.dbsr &= ~spr_val;
+ if (!(vcpu->arch.dbsr & ~DBSR_IDE))
+ kvmppc_core_dequeue_debug(vcpu);
break;
case SPRN_TSR:
kvmppc_clr_tsr_bits(vcpu, spr_val);
@@ -252,6 +380,10 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
emulated = EMULATE_FAIL;
}
+ if (debug_inst) {
+ current->thread.debug = vcpu->arch.dbg_reg;
+ switch_booke_debug_regs(&vcpu->arch.dbg_reg);
+ }
return emulated;
}
@@ -278,12 +410,43 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
case SPRN_CSRR1:
*spr_val = vcpu->arch.csrr1;
break;
+ case SPRN_DSRR0:
+ *spr_val = vcpu->arch.dsrr0;
+ break;
+ case SPRN_DSRR1:
+ *spr_val = vcpu->arch.dsrr1;
+ break;
+ case SPRN_IAC1:
+ *spr_val = vcpu->arch.dbg_reg.iac1;
+ break;
+ case SPRN_IAC2:
+ *spr_val = vcpu->arch.dbg_reg.iac2;
+ break;
+#if CONFIG_PPC_ADV_DEBUG_IACS > 2
+ case SPRN_IAC3:
+ *spr_val = vcpu->arch.dbg_reg.iac3;
+ break;
+ case SPRN_IAC4:
+ *spr_val = vcpu->arch.dbg_reg.iac4;
+ break;
+#endif
+ case SPRN_DAC1:
+ *spr_val = vcpu->arch.dbg_reg.dac1;
+ break;
+ case SPRN_DAC2:
+ *spr_val = vcpu->arch.dbg_reg.dac2;
+ break;
case SPRN_DBCR0:
*spr_val = vcpu->arch.dbg_reg.dbcr0;
+ if (vcpu->guest_debug)
+ *spr_val = *spr_val | DBCR0_EDM;
break;
case SPRN_DBCR1:
*spr_val = vcpu->arch.dbg_reg.dbcr1;
break;
+ case SPRN_DBCR2:
+ *spr_val = vcpu->arch.dbg_reg.dbcr2;
+ break;
case SPRN_DBSR:
*spr_val = vcpu->arch.dbsr;
break;
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
index e9fa56a911fd..81bd8a07aa51 100644
--- a/arch/powerpc/kvm/bookehv_interrupts.S
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -238,7 +238,7 @@ kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
- SPRN_SRR0, SPRN_SRR1,NEED_ESR
+ SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
@@ -256,11 +256,9 @@ kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
+kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
- SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
+kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
SPRN_SRR0, SPRN_SRR1, 0
@@ -350,7 +348,7 @@ kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
-kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
+kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
@@ -361,9 +359,6 @@ kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
-kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index a326178bdea5..72920bed3ac6 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -22,6 +22,7 @@
#include <linux/kvm_host.h>
#include <asm/mmu-book3e.h>
#include <asm/tlb.h>
+#include <asm/cputhreads.h>
enum vcpu_ftr {
VCPU_FTR_MMU_V2
@@ -289,6 +290,25 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
+
+/*
+ * These functions should be called with preemption disabled
+ * and the returned value is valid only in that context
+ */
+static inline int get_thread_specific_lpid(int vm_lpid)
+{
+ int vcpu_lpid = vm_lpid;
+
+ if (threads_per_core == 2)
+ vcpu_lpid |= smp_processor_id() & 1;
+
+ return vcpu_lpid;
+}
+
+static inline int get_lpid(struct kvm_vcpu *vcpu)
+{
+ return get_thread_specific_lpid(vcpu->kvm->arch.lpid);
+}
#else
unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe);
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index c99c40e9182a..ce7291c79f6c 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -259,6 +259,7 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
break;
/* extra exceptions */
+#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
@@ -268,6 +269,15 @@ int kvmppc_core_emulate_mtspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong spr_va
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
+#endif
+#ifdef CONFIG_ALTIVEC
+ case SPRN_IVOR32:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL] = spr_val;
+ break;
+ case SPRN_IVOR33:
+ vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST] = spr_val;
+ break;
+#endif
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
@@ -381,6 +391,7 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
break;
/* extra exceptions */
+#ifdef CONFIG_SPE_POSSIBLE
case SPRN_IVOR32:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
break;
@@ -390,6 +401,15 @@ int kvmppc_core_emulate_mfspr_e500(struct kvm_vcpu *vcpu, int sprn, ulong *spr_v
case SPRN_IVOR34:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
break;
+#endif
+#ifdef CONFIG_ALTIVEC
+ case SPRN_IVOR32:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_UNAVAIL];
+ break;
+ case SPRN_IVOR33:
+ *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALTIVEC_ASSIST];
+ break;
+#endif
case SPRN_IVOR35:
*spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
break;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 08f14bb57897..769778f855b0 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -69,7 +69,8 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
* writing shadow tlb entry to host TLB
*/
static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
- uint32_t mas0)
+ uint32_t mas0,
+ uint32_t lpid)
{
unsigned long flags;
@@ -80,7 +81,7 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
#ifdef CONFIG_KVM_BOOKE_HV
- mtspr(SPRN_MAS8, stlbe->mas8);
+ mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
#endif
asm volatile("isync; tlbwe" : : : "memory");
@@ -129,11 +130,12 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
if (tlbsel == 0) {
mas0 = get_host_mas0(stlbe->mas2);
- __write_host_tlbe(stlbe, mas0);
+ __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
} else {
__write_host_tlbe(stlbe,
MAS0_TLBSEL(1) |
- MAS0_ESEL(to_htlb1_esel(sesel)));
+ MAS0_ESEL(to_htlb1_esel(sesel)),
+ vcpu_e500->vcpu.kvm->arch.lpid);
}
}
@@ -176,7 +178,7 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
magic.mas8 = 0;
- __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
+ __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
preempt_enable();
}
#endif
@@ -317,10 +319,6 @@ static void kvmppc_e500_setup_stlbe(
stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
-
-#ifdef CONFIG_KVM_BOOKE_HV
- stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
-#endif
}
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -633,7 +631,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
local_irq_save(flags);
mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
- mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
+ mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
asm volatile("tlbsx 0, %[geaddr]\n" : :
[geaddr] "r" (geaddr));
mtspr(SPRN_MAS5, 0);
@@ -732,7 +730,7 @@ int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
return 0;
}
-int kvm_age_hva(struct kvm *kvm, unsigned long hva)
+int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
{
/* XXX could be more clever ;) */
return 0;
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 164bad2a19bf..2fdc8722e324 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -48,10 +48,11 @@ void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
return;
}
-
- tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
+ preempt_disable();
+ tag = PPC_DBELL_LPID(get_lpid(vcpu)) | vcpu->vcpu_id;
mb();
ppc_msgsnd(dbell_type, 0, tag);
+ preempt_enable();
}
/* gtlbe must not be mapped by more than one host tlb entry */
@@ -60,12 +61,11 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
{
unsigned int tid, ts;
gva_t eaddr;
- u32 val, lpid;
+ u32 val;
unsigned long flags;
ts = get_tlb_ts(gtlbe);
tid = get_tlb_tid(gtlbe);
- lpid = vcpu_e500->vcpu.kvm->arch.lpid;
/* We search the host TLB to invalidate its shadow TLB entry */
val = (tid << 16) | ts;
@@ -74,7 +74,7 @@ void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
local_irq_save(flags);
mtspr(SPRN_MAS6, val);
- mtspr(SPRN_MAS5, MAS5_SGS | lpid);
+ mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
val = mfspr(SPRN_MAS1);
@@ -95,7 +95,7 @@ void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
unsigned long flags;
local_irq_save(flags);
- mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
+ mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(&vcpu_e500->vcpu));
asm volatile("tlbilxlpid");
mtspr(SPRN_MAS5, 0);
local_irq_restore(flags);
@@ -110,6 +110,7 @@ void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
{
}
+/* We use two lpids per VM */
static DEFINE_PER_CPU(struct kvm_vcpu *[KVMPPC_NR_LPIDS], last_vcpu_of_lpid);
static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
@@ -118,10 +119,12 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
kvmppc_booke_vcpu_load(vcpu, cpu);
- mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
+ mtspr(SPRN_LPID, get_lpid(vcpu));
mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
mtspr(SPRN_GPIR, vcpu->vcpu_id);
mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
+ vcpu->arch.eplc = EPC_EGS | (get_lpid(vcpu) << EPC_ELPID_SHIFT);
+ vcpu->arch.epsc = vcpu->arch.eplc;
mtspr(SPRN_EPLC, vcpu->arch.eplc);
mtspr(SPRN_EPSC, vcpu->arch.epsc);
@@ -141,12 +144,10 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
mtspr(SPRN_GESR, vcpu->arch.shared->esr);
if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
- __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] != vcpu) {
+ __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) {
kvmppc_e500_tlbil_all(vcpu_e500);
- __get_cpu_var(last_vcpu_of_lpid)[vcpu->kvm->arch.lpid] = vcpu;
+ __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu;
}
-
- kvmppc_load_guest_fp(vcpu);
}
static void kvmppc_core_vcpu_put_e500mc(struct kvm_vcpu *vcpu)
@@ -179,6 +180,16 @@ int kvmppc_core_check_processor_compat(void)
r = 0;
else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
r = 0;
+#ifdef CONFIG_ALTIVEC
+ /*
+ * Since guests have the priviledge to enable AltiVec, we need AltiVec
+ * support in the host to save/restore their context.
+ * Don't use CPU_FTR_ALTIVEC to identify cores with AltiVec unit
+ * because it's cleared in the absence of CONFIG_ALTIVEC!
+ */
+ else if (strcmp(cur_cpu_spec->cpu_name, "e6500") == 0)
+ r = 0;
+#endif
else
r = -ENOTSUPP;
@@ -194,9 +205,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
#ifdef CONFIG_64BIT
vcpu->arch.shadow_epcr |= SPRN_EPCR_ICM;
#endif
- vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
- vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
- vcpu->arch.epsc = vcpu->arch.eplc;
+ vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_PMMP;
vcpu->arch.pvr = mfspr(SPRN_PVR);
vcpu_e500->svr = mfspr(SPRN_SVR);
@@ -356,13 +365,26 @@ static int kvmppc_core_init_vm_e500mc(struct kvm *kvm)
if (lpid < 0)
return lpid;
+ /*
+ * Use two lpids per VM on cores with two threads like e6500. Use
+ * even numbers to speedup vcpu lpid computation with consecutive lpids
+ * per VM. vm1 will use lpids 2 and 3, vm2 lpids 4 and 5, and so on.
+ */
+ if (threads_per_core == 2)
+ lpid <<= 1;
+
kvm->arch.lpid = lpid;
return 0;
}
static void kvmppc_core_destroy_vm_e500mc(struct kvm *kvm)
{
- kvmppc_free_lpid(kvm->arch.lpid);
+ int lpid = kvm->arch.lpid;
+
+ if (threads_per_core == 2)
+ lpid >>= 1;
+
+ kvmppc_free_lpid(lpid);
}
static struct kvmppc_ops kvm_ops_e500mc = {
@@ -390,7 +412,13 @@ static int __init kvmppc_e500mc_init(void)
if (r)
goto err_out;
- kvmppc_init_lpid(64);
+ /*
+ * Use two lpids per VM on dual threaded processors like e6500
+ * to workarround the lack of tlb write conditional instruction.
+ * Expose half the number of available hardware lpids to the lpid
+ * allocator.
+ */
+ kvmppc_init_lpid(KVMPPC_NR_LPIDS/threads_per_core);
kvmppc_claim_lpid(0); /* host */
r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index e96b50d0bdab..5cc2e7af3a7b 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -219,7 +219,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
- emulated = kvmppc_get_last_inst(vcpu, false, &inst);
+ emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
if (emulated != EMULATE_DONE)
return emulated;
@@ -274,6 +274,21 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
}
break;
+ case 0:
+ /*
+ * Instruction with primary opcode 0. Based on PowerISA
+ * these are illegal instructions.
+ */
+ if (inst == KVMPPC_INST_SW_BREAKPOINT) {
+ run->exit_reason = KVM_EXIT_DEBUG;
+ run->debug.arch.address = kvmppc_get_pc(vcpu);
+ emulated = EMULATE_EXIT_USER;
+ advance = 0;
+ } else
+ emulated = EMULATE_FAIL;
+
+ break;
+
default:
emulated = EMULATE_FAIL;
}
diff --git a/arch/powerpc/kvm/emulate_loadstore.c b/arch/powerpc/kvm/emulate_loadstore.c
index 0de4ffa175a9..6d3c0ee1d744 100644
--- a/arch/powerpc/kvm/emulate_loadstore.c
+++ b/arch/powerpc/kvm/emulate_loadstore.c
@@ -58,7 +58,7 @@ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
/* this default type might be overwritten by subcategories */
kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
- emulated = kvmppc_get_last_inst(vcpu, false, &inst);
+ emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
if (emulated != EMULATE_DONE)
return emulated;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 4c79284b58be..c1f8f53cd312 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -294,7 +294,7 @@ int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
u32 last_inst;
- kvmppc_get_last_inst(vcpu, false, &last_inst);
+ kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
/* XXX Deliver Program interrupt to guest. */
pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
r = RESUME_HOST;
@@ -384,24 +384,16 @@ int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
}
EXPORT_SYMBOL_GPL(kvmppc_ld);
-int kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void)
{
return 0;
}
-void kvm_arch_hardware_disable(void *garbage)
-{
-}
-
int kvm_arch_hardware_setup(void)
{
return 0;
}
-void kvm_arch_hardware_unsetup(void)
-{
-}
-
void kvm_arch_check_processor_compat(void *rtn)
{
*(int *)rtn = kvmppc_core_check_processor_compat();
@@ -462,10 +454,6 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
module_put(kvm->arch.kvm_ops->owner);
}
-void kvm_arch_sync_events(struct kvm *kvm)
-{
-}
-
int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
int r;
@@ -608,10 +596,6 @@ int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
return kvmppc_core_create_memslot(kvm, slot, npages);
}
-void kvm_arch_memslots_updated(struct kvm *kvm)
-{
-}
-
int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_userspace_memory_region *mem,
@@ -628,10 +612,6 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
kvmppc_core_commit_memory_region(kvm, mem, old);
}
-void kvm_arch_flush_shadow_all(struct kvm *kvm)
-{
-}
-
void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
struct kvm_memory_slot *slot)
{
@@ -658,7 +638,6 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
{
/* Make sure we're not using the vcpu anymore */
hrtimer_cancel(&vcpu->arch.dec_timer);
- tasklet_kill(&vcpu->arch.tasklet);
kvmppc_remove_vcpu_debugfs(vcpu);
@@ -684,16 +663,12 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
return kvmppc_core_pending_dec(vcpu);
}
-/*
- * low level hrtimer wake routine. Because this runs in hardirq context
- * we schedule a tasklet to do the real work.
- */
enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
{
struct kvm_vcpu *vcpu;
vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
- tasklet_schedule(&vcpu->arch.tasklet);
+ kvmppc_decrementer_func(vcpu);
return HRTIMER_NORESTART;
}
@@ -703,7 +678,6 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
int ret;
hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
- tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
vcpu->arch.dec_expires = ~(u64)0;
@@ -927,6 +901,103 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
EXPORT_SYMBOL_GPL(kvmppc_handle_store);
+int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r = 0;
+ union kvmppc_one_reg val;
+ int size;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
+
+ r = kvmppc_get_one_reg(vcpu, reg->id, &val);
+ if (r == -EINVAL) {
+ r = 0;
+ switch (reg->id) {
+#ifdef CONFIG_ALTIVEC
+ case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval;
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val);
+ break;
+ case KVM_REG_PPC_VRSAVE:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ vcpu->arch.vrsave = set_reg_val(reg->id, val);
+ break;
+#endif /* CONFIG_ALTIVEC */
+ default:
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ if (r)
+ return r;
+
+ if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
+ r = -EFAULT;
+
+ return r;
+}
+
+int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
+{
+ int r;
+ union kvmppc_one_reg val;
+ int size;
+
+ size = one_reg_size(reg->id);
+ if (size > sizeof(val))
+ return -EINVAL;
+
+ if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
+ return -EFAULT;
+
+ r = kvmppc_set_one_reg(vcpu, reg->id, &val);
+ if (r == -EINVAL) {
+ r = 0;
+ switch (reg->id) {
+#ifdef CONFIG_ALTIVEC
+ case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0];
+ break;
+ case KVM_REG_PPC_VSCR:
+ if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+ r = -ENXIO;
+ break;
+ }
+ val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]);
+ break;
+ case KVM_REG_PPC_VRSAVE:
+ val = get_reg_val(reg->id, vcpu->arch.vrsave);
+ break;
+#endif /* CONFIG_ALTIVEC */
+ default:
+ r = -EINVAL;
+ break;
+ }
+ }
+
+ return r;
+}
+
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
int r;
@@ -1343,9 +1414,4 @@ int kvm_arch_init(void *opaque)
return 0;
}
-void kvm_arch_exit(void)
-{
-
-}
-
EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr);