diff options
author | David Daney <david.daney@cavium.com> | 2013-05-14 00:56:44 +0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2013-05-16 22:35:42 +0400 |
commit | 48c4ac976ae995f263cde8f09578de86bc8e9f1d (patch) | |
tree | 48e9b3753b951ea2c276546679264c3030a8ea7d /arch/mips/kvm | |
parent | 8ea6cd7af124ad070b44a7f60e225e45e3f38f79 (diff) | |
download | linux-48c4ac976ae995f263cde8f09578de86bc8e9f1d.tar.xz |
Revert "MIPS: Allow ASID size to be determined at boot time."
This reverts commit d532f3d26716a39dfd4b88d687bd344fbe77e390.
The original commit has several problems:
1) Doesn't work with 64-bit kernels.
2) Calls TLBMISS_HANDLER_SETUP() before the code is generated.
3) Calls TLBMISS_HANDLER_SETUP() twice in per_cpu_trap_init() when
only one call is needed.
[ralf@linux-mips.org: Also revert the bits of the ASID patch which were
hidden in the KVM merge.]
Signed-off-by: David Daney <david.daney@cavium.com>
Cc: linux-mips@linux-mips.org
Cc: linux-kernel@vger.kernel.org
Cc: "Steven J. Hill" <Steven.Hill@imgtec.com>
Cc: David Daney <david.daney@cavium.com>
Patchwork: https://patchwork.linux-mips.org/patch/5242/
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/kvm_mips_emul.c | 29 | ||||
-rw-r--r-- | arch/mips/kvm/kvm_tlb.c | 26 |
2 files changed, 31 insertions, 24 deletions
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c index 2b2bac9a40aa..4b6274b47f33 100644 --- a/arch/mips/kvm/kvm_mips_emul.c +++ b/arch/mips/kvm/kvm_mips_emul.c @@ -525,16 +525,18 @@ kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause, printk("MTCz, cop0->reg[EBASE]: %#lx\n", kvm_read_c0_guest_ebase(cop0)); } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { - uint32_t nasid = ASID_MASK(vcpu->arch.gprs[rt]); + uint32_t nasid = + vcpu->arch.gprs[rt] & ASID_MASK; if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) && - (ASID_MASK(kvm_read_c0_guest_entryhi(cop0)) - != nasid)) { + ((kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK) != nasid)) { kvm_debug ("MTCz, change ASID from %#lx to %#lx\n", - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)), - ASID_MASK(vcpu->arch.gprs[rt])); + kvm_read_c0_guest_entryhi(cop0) & + ASID_MASK, + vcpu->arch.gprs[rt] & ASID_MASK); /* Blow away the shadow host TLBs */ kvm_mips_flush_host_tlb(1); @@ -986,7 +988,8 @@ kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause, * resulting handler will do the right thing */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); if (index < 0) { vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK); @@ -1151,7 +1154,7 @@ kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1198,7 +1201,7 @@ kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc, enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1243,7 +1246,7 @@ kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1287,7 +1290,7 @@ kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc, struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { /* save old pc */ @@ -1356,7 +1359,7 @@ kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc, { struct mips_coproc *cop0 = vcpu->arch.cop0; unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0)); + (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK); struct kvm_vcpu_arch *arch = &vcpu->arch; enum emulation_result er = EMULATE_DONE; @@ -1783,8 +1786,8 @@ kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc, */ index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi - (vcpu->arch.cop0))); + (kvm_read_c0_guest_entryhi + (vcpu->arch.cop0) & ASID_MASK)); if (index < 0) { if (exccode == T_TLB_LD_MISS) { er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu); diff --git a/arch/mips/kvm/kvm_tlb.c b/arch/mips/kvm/kvm_tlb.c index 89511a9258d3..e3f0d9b8b6c5 100644 --- a/arch/mips/kvm/kvm_tlb.c +++ b/arch/mips/kvm/kvm_tlb.c @@ -51,13 +51,13 @@ EXPORT_SYMBOL(kvm_mips_is_error_pfn); uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_kernel_asid[smp_processor_id()]); + return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK; } uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) { - return ASID_MASK(vcpu->arch.guest_user_asid[smp_processor_id()]); + return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK; } inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu) @@ -84,7 +84,7 @@ void kvm_mips_dump_host_tlbs(void) old_pagemask = read_c0_pagemask(); printk("HOST TLBs:\n"); - printk("ASID: %#lx\n", ASID_MASK(read_c0_entryhi())); + printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK); for (i = 0; i < current_cpu_data.tlbsize; i++) { write_c0_index(i); @@ -428,7 +428,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) && - (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == ASID_MASK(entryhi)))) { + (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) { index = i; break; } @@ -626,7 +626,7 @@ kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, { unsigned long asid = asid_cache(cpu); - if (!(ASID_MASK(ASID_INC(asid)))) { + if (!((asid += ASID_INC) & ASID_MASK)) { if (cpu_has_vtag_icache) { flush_icache_all(); } @@ -804,7 +804,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (!newasid) { /* If we preempted while the guest was executing, then reload the pre-empted ASID */ if (current->flags & PF_VCPU) { - write_c0_entryhi(ASID_MASK(vcpu->arch.preempt_entryhi)); + write_c0_entryhi(vcpu->arch. + preempt_entryhi & ASID_MASK); ehb(); } } else { @@ -816,11 +817,13 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) */ if (current->flags & PF_VCPU) { if (KVM_GUEST_KERNEL_MODE(vcpu)) - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_kernel_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_kernel_asid[cpu] & + ASID_MASK); else - write_c0_entryhi(ASID_MASK(vcpu->arch. - guest_user_asid[cpu])); + write_c0_entryhi(vcpu->arch. + guest_user_asid[cpu] & + ASID_MASK); ehb(); } } @@ -879,7 +882,8 @@ uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu) kvm_mips_guest_tlb_lookup(vcpu, ((unsigned long) opc & VPN2_MASK) | - ASID_MASK(kvm_read_c0_guest_entryhi(cop0))); + (kvm_read_c0_guest_entryhi + (cop0) & ASID_MASK)); if (index < 0) { kvm_err ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |