summaryrefslogtreecommitdiff
path: root/arch/mips/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kvm/emulate.c')
-rw-r--r--arch/mips/kvm/emulate.c114
1 files changed, 90 insertions, 24 deletions
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index 6eb52b9c9818..8770f32c9e0b 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -13,7 +13,6 @@
#include <linux/err.h>
#include <linux/ktime.h>
#include <linux/kvm_host.h>
-#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/bootmem.h>
@@ -846,6 +845,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
return EMULATE_FAIL;
}
+/**
+ * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map.
+ * @vcpu: VCPU with changed mappings.
+ * @tlb: TLB entry being removed.
+ *
+ * This is called to indicate a single change in guest MMU mappings, so that we
+ * can arrange TLB flushes on this and other CPUs.
+ */
+static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ int cpu, i;
+ bool user;
+
+ /* No need to flush for entries which are already invalid */
+ if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V))
+ return;
+ /* User address space doesn't need flushing for KSeg2/3 changes */
+ user = tlb->tlb_hi < KVM_GUEST_KSEG0;
+
+ preempt_disable();
+
+ /*
+ * Probe the shadow host TLB for the entry being overwritten, if one
+ * matches, invalidate it
+ */
+ kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ /* Invalidate the whole ASID on other CPUs */
+ cpu = smp_processor_id();
+ for_each_possible_cpu(i) {
+ if (i == cpu)
+ continue;
+ if (user)
+ vcpu->arch.guest_user_asid[i] = 0;
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ }
+
+ preempt_enable();
+}
+
/* Write Guest TLB Entry @ Index */
enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
{
@@ -865,11 +905,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
}
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -898,11 +935,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb = &vcpu->arch.guest_tlb[index];
- /*
- * Probe the shadow host TLB for the entry being overwritten, if one
- * matches, invalidate it
- */
- kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
+ kvm_mips_invalidate_guest_tlb(vcpu, tlb);
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
@@ -1026,6 +1059,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
enum emulation_result er = EMULATE_DONE;
u32 rt, rd, sel;
unsigned long curr_pc;
+ int cpu, i;
/*
* Update PC and hold onto current PC in case there is
@@ -1127,16 +1161,31 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
u32 nasid =
vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
- if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
- ((kvm_read_c0_guest_entryhi(cop0) &
+ if (((kvm_read_c0_guest_entryhi(cop0) &
KVM_ENTRYHI_ASID) != nasid)) {
trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
& KVM_ENTRYHI_ASID,
nasid);
- /* Blow away the shadow host TLBs */
- kvm_mips_flush_host_tlb(1);
+ /*
+ * Regenerate/invalidate kernel MMU
+ * context.
+ * The user MMU context will be
+ * regenerated lazily on re-entry to
+ * guest user if the guest ASID actually
+ * changes.
+ */
+ preempt_disable();
+ cpu = smp_processor_id();
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm,
+ cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ for_each_possible_cpu(i)
+ if (i != cpu)
+ vcpu->arch.guest_kernel_asid[i] = 0;
+ preempt_enable();
}
kvm_write_c0_guest_entryhi(cop0,
vcpu->arch.gprs[rt]);
@@ -1642,8 +1691,14 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
preempt_disable();
if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
- if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
- kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
+ if (kvm_mips_host_tlb_lookup(vcpu, va) < 0 &&
+ kvm_mips_handle_kseg0_tlb_fault(va, vcpu)) {
+ kvm_err("%s: handling mapped kseg0 tlb fault for %lx, vcpu: %p, ASID: %#lx\n",
+ __func__, va, vcpu, read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
+ }
} else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
int index;
@@ -1680,12 +1735,18 @@ enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
run, vcpu);
preempt_enable();
goto dont_update_pc;
- } else {
- /*
- * We fault an entry from the guest tlb to the
- * shadow host TLB
- */
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+ }
+ /*
+ * We fault an entry from the guest tlb to the
+ * shadow host TLB
+ */
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ preempt_enable();
+ goto done;
}
}
} else {
@@ -2659,7 +2720,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
+ if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb)) {
+ kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n",
+ __func__, va, index, vcpu,
+ read_c0_entryhi());
+ er = EMULATE_FAIL;
+ }
}
}