From f6f7017192ad62669dc8aa4cb33e5f5a0ecd2d81 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 1 Aug 2016 09:07:52 +0100 Subject: KVM: MIPS: Override HVA error values for EVA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MIPS Enhanced Virtual Addressing (EVA) allows the user mode and kernel mode address spaces to overlap, breaking the assumption that PAGE_OFFSET is an appropriate KVM HVA error value, since PAGE_OFFSET may be as low as zero. Fix this in the same way that s390 does in commit bf640876e21f ("KVM: s390: Make KVM_HVA_ERR_BAD usable on s390"), by overriding KVM_HVA_ERR_[RO_]BAD and kvm_is_error_hva() in asm/kvm_host.h. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index b54bcadd8aec..4d7e0e466b5a 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -107,6 +107,20 @@ #define KVM_INVALID_INST 0xdeadbeef #define KVM_INVALID_ADDR 0xdeadbeef +/* + * EVA has overlapping user & kernel address spaces, so user VAs may be > + * PAGE_OFFSET. For this reason we can't use the default KVM_HVA_ERR_BAD of + * PAGE_OFFSET. + */ + +#define KVM_HVA_ERR_BAD (-1UL) +#define KVM_HVA_ERR_RO_BAD (-2UL) + +static inline bool kvm_is_error_hva(unsigned long addr) +{ + return IS_ERR_VALUE(addr); +} + extern atomic_t kvm_mips_instance; struct kvm_vm_stat { -- cgit v1.2.3 From d5888477d31c64dba9264fbb33cfa9b066f071d0 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 19 Aug 2016 15:09:47 +0100 Subject: KVM: MIPS: Emulate MMIO via TLB miss for EVA MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit MIPS Enhanced Virtual Addressing (EVA) allows the virtual memory segments to be rearranged such that the KSeg0/KSeg1 segments are accessible TLB mapped to user mode, which would trigger a TLB Miss exception (due to lack of TLB mappings) instead of an Address Error exception. Update the TLB Miss handling similar to Address Error handling for guest MMIO emulation. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/trap_emul.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) (limited to 'arch/mips') diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c index 091553942bcb..3a5484f9aa50 100644 --- a/arch/mips/kvm/trap_emul.c +++ b/arch/mips/kvm/trap_emul.c @@ -175,6 +175,24 @@ static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) run->exit_reason = KVM_EXIT_INTERNAL_ERROR; ret = RESUME_HOST; } + } else if (KVM_GUEST_KERNEL_MODE(vcpu) + && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { + /* + * With EVA we may get a TLB exception instead of an address + * error when the guest performs MMIO to KSeg1 addresses. + */ + kvm_debug("Emulate %s MMIO space\n", + store ? "Store to" : "Load from"); + er = kvm_mips_emulate_inst(cause, opc, run, vcpu); + if (er == EMULATE_FAIL) { + kvm_err("Emulate %s MMIO space failed\n", + store ? "Store to" : "Load from"); + run->exit_reason = KVM_EXIT_INTERNAL_ERROR; + ret = RESUME_HOST; + } else { + run->exit_reason = KVM_EXIT_MMIO; + ret = RESUME_HOST; + } } else { kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", store ? "ST" : "LD", cause, opc, badvaddr); -- cgit v1.2.3 From 91e4f1b6073dd680d86cdb7e42d7cccca9db39d8 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Thu, 15 Sep 2016 17:20:06 +0100 Subject: KVM: MIPS: Drop other CPU ASIDs on guest MMU changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a guest TLB entry is replaced by TLBWI or TLBWR, we only invalidate TLB entries on the local CPU. This doesn't work correctly on an SMP host when the guest is migrated to a different physical CPU, as it could pick up stale TLB mappings from the last time the vCPU ran on that physical CPU. Therefore invalidate both user and kernel host ASIDs on other CPUs, which will cause new ASIDs to be generated when it next runs on those CPUs. We're careful only to do this if the TLB entry was already valid, and only for the kernel ASID where the virtual address it mapped is outside of the guest user address range. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org Cc: # 3.10.x- --- arch/mips/kvm/emulate.c | 63 +++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 10 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index e788515f766b..43853ec6e160 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -846,6 +846,47 @@ enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) return EMULATE_FAIL; } +/** + * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. + * @vcpu: VCPU with changed mappings. + * @tlb: TLB entry being removed. + * + * This is called to indicate a single change in guest MMU mappings, so that we + * can arrange TLB flushes on this and other CPUs. + */ +static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, + struct kvm_mips_tlb *tlb) +{ + int cpu, i; + bool user; + + /* No need to flush for entries which are already invalid */ + if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) + return; + /* User address space doesn't need flushing for KSeg2/3 changes */ + user = tlb->tlb_hi < KVM_GUEST_KSEG0; + + preempt_disable(); + + /* + * Probe the shadow host TLB for the entry being overwritten, if one + * matches, invalidate it + */ + kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + /* Invalidate the whole ASID on other CPUs */ + cpu = smp_processor_id(); + for_each_possible_cpu(i) { + if (i == cpu) + continue; + if (user) + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + + preempt_enable(); +} + /* Write Guest TLB Entry @ Index */ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) { @@ -865,11 +906,8 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) } tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -898,11 +936,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) tlb = &vcpu->arch.guest_tlb[index]; - /* - * Probe the shadow host TLB for the entry being overwritten, if one - * matches, invalidate it - */ - kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi); + kvm_mips_invalidate_guest_tlb(vcpu, tlb); tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); @@ -1026,6 +1060,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, enum emulation_result er = EMULATE_DONE; u32 rt, rd, sel; unsigned long curr_pc; + int cpu, i; /* * Update PC and hold onto current PC in case there is @@ -1135,8 +1170,16 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, & KVM_ENTRYHI_ASID, nasid); + preempt_disable(); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); + cpu = smp_processor_id(); + for_each_possible_cpu(i) + if (i != cpu) { + vcpu->arch.guest_user_asid[i] = 0; + vcpu->arch.guest_kernel_asid[i] = 0; + } + preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, vcpu->arch.gprs[rt]); -- cgit v1.2.3 From f3124cc551c8575c30e3736faf2d0bca54ee07cb Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 16 Sep 2016 00:00:08 +0100 Subject: KVM: MIPS: Split kernel/user ASID regeneration MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The host ASIDs for guest kernel and user mode are regenerated together if the ASID for guest kernel mode is out of date. That is fine as the ASID for guest kernel mode is always generated first, however it doesn't allow the ASIDs to be regenerated or invalidated individually instead of linearly flushing the entire host TLB. Therefore separate the regeneration code so that the ASIDs are checked and regenerated separately. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/mmu.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 6cfdcf55572d..c1f8758f5323 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -250,6 +250,16 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu); vcpu->arch.guest_kernel_asid[cpu] = vcpu->arch.guest_kernel_mm.context.asid[cpu]; + newasid++; + + kvm_debug("[%d]: cpu_context: %#lx\n", cpu, + cpu_context(cpu, current->mm)); + kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", + cpu, vcpu->arch.guest_kernel_asid[cpu]); + } + + if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & + asid_version_mask(cpu)) { kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; @@ -257,8 +267,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_debug("[%d]: cpu_context: %#lx\n", cpu, cpu_context(cpu, current->mm)); - kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n", - cpu, vcpu->arch.guest_kernel_asid[cpu]); kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu, vcpu->arch.guest_user_asid[cpu]); } -- cgit v1.2.3 From 25b08c7fb0e410bdf7c42ea1faff816eb0451bbc Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 16 Sep 2016 00:06:43 +0100 Subject: KVM: MIPS: Invalidate TLB by regenerating ASIDs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Invalidate host TLB mappings when the guest ASID is changed by regenerating ASIDs, rather than flushing the entire host TLB except entries in the guest KSeg0 range. For the guest kernel mode ASID we regenerate on the spot when the guest ASID is changed, as that will always take place while the guest is in kernel mode. However when the guest invalidates TLB entries the ASID will often by changed temporarily as part of writing EntryHi without the guest returning to user mode in between. We therefore regenerate the user mode ASID lazily before entering the guest in user mode, if and only if the guest ASID has actually changed since the last guest user mode entry. Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/include/asm/kvm_host.h | 3 +++ arch/mips/kvm/emulate.c | 18 +++++++++++++----- arch/mips/kvm/mips.c | 30 ++++++++++++++++++++++++++++++ arch/mips/kvm/mmu.c | 4 ++++ 4 files changed, 50 insertions(+), 5 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h index 4d7e0e466b5a..a5685c1adba2 100644 --- a/arch/mips/include/asm/kvm_host.h +++ b/arch/mips/include/asm/kvm_host.h @@ -328,6 +328,9 @@ struct kvm_vcpu_arch { u32 guest_kernel_asid[NR_CPUS]; struct mm_struct guest_kernel_mm, guest_user_mm; + /* Guest ASID of last user mode execution */ + unsigned int last_user_gasid; + int last_sched_cpu; /* WAIT executed */ diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 43853ec6e160..8dc9e64346e6 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1170,15 +1170,23 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, & KVM_ENTRYHI_ASID, nasid); + /* + * Regenerate/invalidate kernel MMU + * context. + * The user MMU context will be + * regenerated lazily on re-entry to + * guest user if the guest ASID actually + * changes. + */ preempt_disable(); - /* Blow away the shadow host TLBs */ - kvm_mips_flush_host_tlb(1); cpu = smp_processor_id(); + kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, + cpu, vcpu); + vcpu->arch.guest_kernel_asid[cpu] = + vcpu->arch.guest_kernel_mm.context.asid[cpu]; for_each_possible_cpu(i) - if (i != cpu) { - vcpu->arch.guest_user_asid[i] = 0; + if (i != cpu) vcpu->arch.guest_kernel_asid[i] = 0; - } preempt_enable(); } kvm_write_c0_guest_entryhi(cop0, diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index a6ea084b4d9d..ad1b15ba5907 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -411,6 +411,31 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -ENOIOCTLCMD; } +/* Must be called with preemption disabled, just before entering guest */ +static void kvm_mips_check_asids(struct kvm_vcpu *vcpu) +{ + struct mips_coproc *cop0 = vcpu->arch.cop0; + int cpu = smp_processor_id(); + unsigned int gasid; + + /* + * Lazy host ASID regeneration for guest user mode. + * If the guest ASID has changed since the last guest usermode + * execution, regenerate the host ASID so as to invalidate stale TLB + * entries. + */ + if (!KVM_GUEST_KERNEL_MODE(vcpu)) { + gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; + if (gasid != vcpu->arch.last_user_gasid) { + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, + vcpu); + vcpu->arch.guest_user_asid[cpu] = + vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; + } + } +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r = 0; @@ -438,6 +463,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) htw_stop(); trace_kvm_enter(vcpu); + + kvm_mips_check_asids(vcpu); + r = vcpu->arch.vcpu_run(run, vcpu); trace_kvm_out(vcpu); @@ -1551,6 +1579,8 @@ skip_emul: if (ret == RESUME_GUEST) { trace_kvm_reenter(vcpu); + kvm_mips_check_asids(vcpu); + /* * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context * is live), restore FCR31 / MSACSR. diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index c1f8758f5323..8e1f2bffcf0f 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -260,9 +260,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if ((vcpu->arch.guest_user_asid[cpu] ^ asid_cache(cpu)) & asid_version_mask(cpu)) { + u32 gasid = kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & + KVM_ENTRYHI_ASID; + kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu); vcpu->arch.guest_user_asid[cpu] = vcpu->arch.guest_user_mm.context.asid[cpu]; + vcpu->arch.last_user_gasid = gasid; newasid++; kvm_debug("[%d]: cpu_context: %#lx\n", cpu, -- cgit v1.2.3 From bf18db4e7bd99f3a65bcc43225790b16af733321 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 16 Sep 2016 13:14:09 +0100 Subject: KVM: MIPS: Drop dubious EntryHi optimisation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There exists a slightly dubious optimisation in the implementation of the MIPS KVM EntryHi emulation which skips TLB invalidation if the EntryHi points to an address in the guest KSeg0 region, intended to catch guest TLB invalidations where the ASID is almost immediately restored to the previous value. Now that we perform lazy host ASID regeneration for guest user mode when the guest ASID changes we should be able to drop the optimisation without a significant impact (only the extra TLB refills for the small amount of code while the TLB is being invalidated). Signed-off-by: James Hogan Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: Ralf Baechle Cc: linux-mips@linux-mips.org Cc: kvm@vger.kernel.org --- arch/mips/kvm/emulate.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/mips') diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c index 8dc9e64346e6..4db4c0370859 100644 --- a/arch/mips/kvm/emulate.c +++ b/arch/mips/kvm/emulate.c @@ -1162,8 +1162,7 @@ enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { u32 nasid = vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID; - if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && - ((kvm_read_c0_guest_entryhi(cop0) & + if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) -- cgit v1.2.3