diff options
Diffstat (limited to 'arch/arc/mm/tlb.c')
-rw-r--r-- | arch/arc/mm/tlb.c | 255 |
1 files changed, 171 insertions, 84 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 7957dc4e4d4a..e1acf0ce5647 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c @@ -52,6 +52,7 @@ */ #include <linux/module.h> +#include <linux/bug.h> #include <asm/arcregs.h> #include <asm/setup.h> #include <asm/mmu_context.h> @@ -99,48 +100,45 @@ /* A copy of the ASID from the PID reg is kept in asid_cache */ -int asid_cache = FIRST_ASID; - -/* ASID to mm struct mapping. We have one extra entry corresponding to - * NO_ASID to save us a compare when clearing the mm entry for old asid - * see get_new_mmu_context (asm-arc/mmu_context.h) - */ -struct mm_struct *asid_mm_map[NUM_ASID + 1]; +DEFINE_PER_CPU(unsigned int, asid_cache) = MM_CTXT_FIRST_CYCLE; /* * Utility Routine to erase a J-TLB entry - * The procedure is to look it up in the MMU. If found, ERASE it by - * issuing a TlbWrite CMD with PD0 = PD1 = 0 + * Caller needs to setup Index Reg (manually or via getIndex) */ - -static void __tlb_entry_erase(void) +static inline void __tlb_entry_erase(void) { write_aux_reg(ARC_REG_TLBPD1, 0); write_aux_reg(ARC_REG_TLBPD0, 0); write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); } -static void tlb_entry_erase(unsigned int vaddr_n_asid) +static inline unsigned int tlb_entry_lkup(unsigned long vaddr_n_asid) { unsigned int idx; - /* Locate the TLB entry for this vaddr + ASID */ write_aux_reg(ARC_REG_TLBPD0, vaddr_n_asid); + write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); idx = read_aux_reg(ARC_REG_TLBINDEX); + return idx; +} + +static void tlb_entry_erase(unsigned int vaddr_n_asid) +{ + unsigned int idx; + + /* Locate the TLB entry for this vaddr + ASID */ + idx = tlb_entry_lkup(vaddr_n_asid); + /* No error means entry found, zero it out */ if (likely(!(idx & TLB_LKUP_ERR))) { __tlb_entry_erase(); - } else { /* Some sort of Error */ - + } else { /* Duplicate entry error */ - if (idx & 0x1) { - /* TODO we need to handle this case too */ - pr_emerg("unhandled Duplicate flush for %x\n", - vaddr_n_asid); - } - /* else entry not found so nothing to do */ + WARN(idx == TLB_DUP_ERR, "Probe returned Dup PD for %x\n", + vaddr_n_asid); } } @@ -159,7 +157,7 @@ static void utlb_invalidate(void) { #if (CONFIG_ARC_MMU_VER >= 2) -#if (CONFIG_ARC_MMU_VER < 3) +#if (CONFIG_ARC_MMU_VER == 2) /* MMU v2 introduced the uTLB Flush command. * There was however an obscure hardware bug, where uTLB flush would * fail when a prior probe for J-TLB (both totally unrelated) would @@ -182,6 +180,36 @@ static void utlb_invalidate(void) } +static void tlb_entry_insert(unsigned int pd0, unsigned int pd1) +{ + unsigned int idx; + + /* + * First verify if entry for this vaddr+ASID already exists + * This also sets up PD0 (vaddr, ASID..) for final commit + */ + idx = tlb_entry_lkup(pd0); + + /* + * If Not already present get a free slot from MMU. + * Otherwise, Probe would have located the entry and set INDEX Reg + * with existing location. This will cause Write CMD to over-write + * existing entry with new PD0 and PD1 + */ + if (likely(idx & TLB_LKUP_ERR)) + write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); + + /* setup the other half of TLB entry (pfn, rwx..) */ + write_aux_reg(ARC_REG_TLBPD1, pd1); + + /* + * Commit the Entry to MMU + * It doesnt sound safe to use the TLBWriteNI cmd here + * which doesn't flush uTLBs. I'd rather be safe than sorry. + */ + write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); +} + /* * Un-conditionally (without lookup) erase the entire MMU contents */ @@ -224,13 +252,14 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) return; /* - * Workaround for Android weirdism: - * A binder VMA could end up in a task such that vma->mm != tsk->mm - * old code would cause h/w - s/w ASID to get out of sync + * - Move to a new ASID, but only if the mm is still wired in + * (Android Binder ended up calling this for vma->mm != tsk->mm, + * causing h/w - s/w ASID to get out of sync) + * - Also get_new_mmu_context() new implementation allocates a new + * ASID only if it is not allocated already - so unallocate first */ - if (current->mm != mm) - destroy_context(mm); - else + destroy_context(mm); + if (current->mm == mm) get_new_mmu_context(mm); } @@ -245,8 +274,8 @@ noinline void local_flush_tlb_mm(struct mm_struct *mm) void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; - unsigned int asid; /* If range @start to @end is more than 32 TLB entries deep, * its better to move to a new ASID rather than searching for @@ -268,11 +297,10 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, start &= PAGE_MASK; local_irq_save(flags); - asid = vma->vm_mm->context.asid; - if (asid != NO_ASID) { + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { while (start < end) { - tlb_entry_erase(start | (asid & 0xff)); + tlb_entry_erase(start | hw_pid(vma->vm_mm, cpu)); start += PAGE_SIZE; } } @@ -319,6 +347,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) { + const unsigned int cpu = smp_processor_id(); unsigned long flags; /* Note that it is critical that interrupts are DISABLED between @@ -326,23 +355,95 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) */ local_irq_save(flags); - if (vma->vm_mm->context.asid != NO_ASID) { - tlb_entry_erase((page & PAGE_MASK) | - (vma->vm_mm->context.asid & 0xff)); + if (asid_mm(vma->vm_mm, cpu) != MM_CTXT_NO_ASID) { + tlb_entry_erase((page & PAGE_MASK) | hw_pid(vma->vm_mm, cpu)); utlb_invalidate(); } local_irq_restore(flags); } +#ifdef CONFIG_SMP + +struct tlb_args { + struct vm_area_struct *ta_vma; + unsigned long ta_start; + unsigned long ta_end; +}; + +static inline void ipi_flush_tlb_page(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_page(ta->ta_vma, ta->ta_start); +} + +static inline void ipi_flush_tlb_range(void *arg) +{ + struct tlb_args *ta = arg; + + local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end); +} + +static inline void ipi_flush_tlb_kernel_range(void *arg) +{ + struct tlb_args *ta = (struct tlb_args *)arg; + + local_flush_tlb_kernel_range(ta->ta_start, ta->ta_end); +} + +void flush_tlb_all(void) +{ + on_each_cpu((smp_call_func_t)local_flush_tlb_all, NULL, 1); +} + +void flush_tlb_mm(struct mm_struct *mm) +{ + on_each_cpu_mask(mm_cpumask(mm), (smp_call_func_t)local_flush_tlb_mm, + mm, 1); +} + +void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = uaddr + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_page, &ta, 1); +} + +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end) +{ + struct tlb_args ta = { + .ta_vma = vma, + .ta_start = start, + .ta_end = end + }; + + on_each_cpu_mask(mm_cpumask(vma->vm_mm), ipi_flush_tlb_range, &ta, 1); +} + +void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + struct tlb_args ta = { + .ta_start = start, + .ta_end = end + }; + + on_each_cpu(ipi_flush_tlb_kernel_range, &ta, 1); +} +#endif + /* * Routine to create a TLB entry */ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) { unsigned long flags; - unsigned int idx, asid_or_sasid; - unsigned long pd0_flags; + unsigned int asid_or_sasid, rwx; + unsigned long pd0, pd1; /* * create_tlb() assumes that current->mm == vma->mm, since @@ -374,47 +475,37 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) local_irq_save(flags); - tlb_paranoid_check(vma->vm_mm->context.asid, address); + tlb_paranoid_check(asid_mm(vma->vm_mm, smp_processor_id()), address); address &= PAGE_MASK; /* update this PTE credentials */ pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); - /* Create HW TLB entry Flags (in PD0) from PTE Flags */ -#if (CONFIG_ARC_MMU_VER <= 2) - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); -#else - pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); -#endif + /* Create HW TLB(PD0,PD1) from PTE */ /* ASID for this task */ asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; - write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); - - /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ - write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); - - /* First verify if entry for this vaddr+ASID already exists */ - write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); - idx = read_aux_reg(ARC_REG_TLBINDEX); + pd0 = address | asid_or_sasid | (pte_val(*ptep) & PTE_BITS_IN_PD0); /* - * If Not already present get a free slot from MMU. - * Otherwise, Probe would have located the entry and set INDEX Reg - * with existing location. This will cause Write CMD to over-write - * existing entry with new PD0 and PD1 + * ARC MMU provides fully orthogonal access bits for K/U mode, + * however Linux only saves 1 set to save PTE real-estate + * Here we convert 3 PTE bits into 6 MMU bits: + * -Kernel only entries have Kr Kw Kx 0 0 0 + * -User entries have mirrored K and U bits */ - if (likely(idx & TLB_LKUP_ERR)) - write_aux_reg(ARC_REG_TLBCOMMAND, TLBGetIndex); + rwx = pte_val(*ptep) & PTE_BITS_RWX; - /* - * Commit the Entry to MMU - * It doesnt sound safe to use the TLBWriteNI cmd here - * which doesn't flush uTLBs. I'd rather be safe than sorry. - */ - write_aux_reg(ARC_REG_TLBCOMMAND, TLBWrite); + if (pte_val(*ptep) & _PAGE_GLOBAL) + rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ + else + rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ + + pd1 = rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1); + + tlb_entry_insert(pd0, pd1); local_irq_restore(flags); } @@ -553,13 +644,6 @@ void arc_mmu_init(void) if (mmu->pg_sz != PAGE_SIZE) panic("MMU pg size != PAGE_SIZE (%luk)\n", TO_KB(PAGE_SIZE)); - /* - * ASID mgmt data structures are compile time init - * asid_cache = FIRST_ASID and asid_mm_map[] all zeroes - */ - - local_flush_tlb_all(); - /* Enable the MMU */ write_aux_reg(ARC_REG_PID, MMU_ENABLE); @@ -601,9 +685,9 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, struct pt_regs *regs) { int set, way, n; - unsigned int pd0[4], pd1[4]; /* assume max 4 ways */ unsigned long flags, is_valid; struct cpuinfo_arc_mmu *mmu = &cpuinfo_arc700[smp_processor_id()].mmu; + unsigned int pd0[mmu->ways], pd1[mmu->ways]; local_irq_save(flags); @@ -628,7 +712,7 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, continue; /* Scan the set for duplicate ways: needs a nested loop */ - for (way = 0; way < mmu->ways; way++) { + for (way = 0; way < mmu->ways - 1; way++) { if (!pd0[way]) continue; @@ -671,25 +755,28 @@ void do_tlb_overlap_fault(unsigned long cause, unsigned long address, * Low Level ASM TLB handler calls this if it finds that HW and SW ASIDS * don't match */ -void print_asid_mismatch(int is_fast_path) +void print_asid_mismatch(int mm_asid, int mmu_asid, int is_fast_path) { - int pid_sw, pid_hw; - pid_sw = current->active_mm->context.asid; - pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; - pr_emerg("ASID Mismatch in %s Path Handler: sw-pid=0x%x hw-pid=0x%x\n", - is_fast_path ? "Fast" : "Slow", pid_sw, pid_hw); + is_fast_path ? "Fast" : "Slow", mm_asid, mmu_asid); __asm__ __volatile__("flag 1"); } -void tlb_paranoid_check(unsigned int pid_sw, unsigned long addr) +void tlb_paranoid_check(unsigned int mm_asid, unsigned long addr) { - unsigned int pid_hw; + unsigned int mmu_asid; - pid_hw = read_aux_reg(ARC_REG_PID) & 0xff; + mmu_asid = read_aux_reg(ARC_REG_PID) & 0xff; - if (addr < 0x70000000 && ((pid_hw != pid_sw) || (pid_sw == NO_ASID))) - print_asid_mismatch(0); + /* + * At the time of a TLB miss/installation + * - HW version needs to match SW version + * - SW needs to have a valid ASID + */ + if (addr < 0x70000000 && + ((mm_asid == MM_CTXT_NO_ASID) || + (mmu_asid != (mm_asid & MM_CTXT_ASID_MASK)))) + print_asid_mismatch(mm_asid, mmu_asid, 0); } #endif |