diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-07 01:22:01 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-07 01:22:01 +0300 |
commit | 3b69e8b4571125bec1f77f886174fe6cab6b9d75 (patch) | |
tree | 4a125694310d161b6cb3097c4bf7542e97919987 /arch/sh/mm | |
parent | b170290c2836c40ab565736ba37681eb3dfd79b8 (diff) | |
parent | 37744feebc086908fd89760650f458ab19071750 (diff) | |
download | linux-3b69e8b4571125bec1f77f886174fe6cab6b9d75.tar.xz |
Merge tag 'sh-for-5.8' of git://git.libc.org/linux-sh
Pull arch/sh updates from Rich Felker:
"Fix for arch/sh build regression with newer binutils, removal of SH5,
fixes for module exports, and misc cleanup"
* tag 'sh-for-5.8' of git://git.libc.org/linux-sh:
sh: remove sh5 support
sh: add missing EXPORT_SYMBOL() for __delay
sh: Convert ins[bwl]/outs[bwl] macros to inline functions
sh: Convert iounmap() macros to inline functions
sh: Add missing DECLARE_EXPORT() for __ashiftrt_r4_xx
sh: configs: Cleanup old Kconfig IO scheduler options
arch/sh: vmlinux.scr
sh: Replace CONFIG_MTD_M25P80 with CONFIG_MTD_SPI_NOR in sh7757lcr_defconfig
sh: sh4a: Bring back tmu3_device early device
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 16 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 31 | ||||
-rw-r--r-- | arch/sh/mm/cache-sh5.c | 626 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 6 | ||||
-rw-r--r-- | arch/sh/mm/extable_64.c | 84 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh5.c | 224 | ||||
-rw-r--r-- | arch/sh/mm/tlbex_64.c | 171 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 172 |
8 files changed, 7 insertions, 1323 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 5c8a2ebfc720..6c39d24ad919 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig @@ -15,8 +15,7 @@ config MMU config PAGE_OFFSET hex - default "0x80000000" if MMU && SUPERH32 - default "0x20000000" if MMU && SUPERH64 + default "0x80000000" if MMU default "0x00000000" config FORCE_MAX_ZONEORDER @@ -72,12 +71,11 @@ config MEMORY_SIZE config 29BIT def_bool !32BIT - depends on SUPERH32 select UNCACHED_MAPPING config 32BIT bool - default y if CPU_SH5 || !MMU + default !MMU config PMB bool "Support 32-bit physical addressing through PMB" @@ -152,7 +150,7 @@ config ARCH_MEMORY_PROBE config IOREMAP_FIXED def_bool y - depends on X2TLB || SUPERH64 + depends on X2TLB config UNCACHED_MAPPING bool @@ -184,7 +182,7 @@ config PAGE_SIZE_16KB config PAGE_SIZE_64KB bool "64kB" - depends on !MMU || CPU_SH4 || CPU_SH5 + depends on !MMU || CPU_SH4 help This enables support for 64kB pages, possible on all SH-4 CPUs and later. @@ -216,10 +214,6 @@ config HUGETLB_PAGE_SIZE_64MB bool "64MB" depends on X2TLB -config HUGETLB_PAGE_SIZE_512MB - bool "512MB" - depends on CPU_SH5 - endchoice config SCHED_MC @@ -242,7 +236,7 @@ config SH7705_CACHE_32KB choice prompt "Cache mode" - default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 || CPU_SH5 + default CACHE_WRITEBACK if CPU_SH2A || CPU_SH3 || CPU_SH4 default CACHE_WRITETHROUGH if (CPU_SH2 && !CPU_SH2A) config CACHE_WRITEBACK diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index 5051b38fd5b6..487da0ff03b3 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile @@ -10,15 +10,14 @@ cacheops-$(CONFIG_CPU_SUBTYPE_SH7619) := cache-sh2.o cacheops-$(CONFIG_CPU_SH2A) := cache-sh2a.o cacheops-$(CONFIG_CPU_SH3) := cache-sh3.o cacheops-$(CONFIG_CPU_SH4) := cache-sh4.o flush-sh4.o -cacheops-$(CONFIG_CPU_SH5) := cache-sh5.o flush-sh4.o cacheops-$(CONFIG_SH7705_CACHE_32KB) += cache-sh7705.o cacheops-$(CONFIG_CPU_SHX3) += cache-shx3.o obj-y += $(cacheops-y) mmu-y := nommu.o extable_32.o -mmu-$(CONFIG_MMU) := extable_$(BITS).o fault.o ioremap.o kmap.o \ - pgtable.o tlbex_$(BITS).o tlbflush_$(BITS).o +mmu-$(CONFIG_MMU) := extable_32.o fault.o ioremap.o kmap.o \ + pgtable.o tlbex_32.o tlbflush_32.o obj-y += $(mmu-y) @@ -31,7 +30,6 @@ ifdef CONFIG_MMU debugfs-$(CONFIG_CPU_SH4) += tlb-debugfs.o tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o -tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o obj-y += $(tlb-y) endif @@ -46,29 +44,4 @@ obj-$(CONFIG_HAVE_SRAM_POOL) += sram.o GCOV_PROFILE_pmb.o := n -# Special flags for tlbex_64.o. This puts restrictions on the number of -# caller-save registers that the compiler can target when building this file. -# This is required because the code is called from a context in entry.S where -# very few registers have been saved in the exception handler (for speed -# reasons). -# The caller save registers that have been saved and which can be used are -# r2,r3,r4,r5 : argument passing -# r15, r18 : SP and LINK -# tr0-4 : allow all caller-save TR's. The compiler seems to be able to make -# use of them, so it's probably beneficial to performance to save them -# and have them available for it. -# -# The resources not listed below are callee save, i.e. the compiler is free to -# use any of them and will spill them to the stack itself. - -CFLAGS_tlbex_64.o += -ffixed-r7 \ - -ffixed-r8 -ffixed-r9 -ffixed-r10 -ffixed-r11 -ffixed-r12 \ - -ffixed-r13 -ffixed-r14 -ffixed-r16 -ffixed-r17 -ffixed-r19 \ - -ffixed-r20 -ffixed-r21 -ffixed-r22 -ffixed-r23 \ - -ffixed-r24 -ffixed-r25 -ffixed-r26 -ffixed-r27 \ - -ffixed-r36 -ffixed-r37 -ffixed-r38 -ffixed-r39 -ffixed-r40 \ - -ffixed-r41 -ffixed-r42 -ffixed-r43 \ - -ffixed-r60 -ffixed-r61 -ffixed-r62 \ - -fomit-frame-pointer - ccflags-y := -Werror diff --git a/arch/sh/mm/cache-sh5.c b/arch/sh/mm/cache-sh5.c deleted file mode 100644 index 442a77cc2957..000000000000 --- a/arch/sh/mm/cache-sh5.c +++ /dev/null @@ -1,626 +0,0 @@ -/* - * arch/sh/mm/cache-sh5.c - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2002 Benedict Gaster - * Copyright (C) 2003 Richard Curnow - * Copyright (C) 2003 - 2008 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/init.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <asm/tlb.h> -#include <asm/processor.h> -#include <asm/cache.h> -#include <asm/pgalloc.h> -#include <linux/uaccess.h> -#include <asm/mmu_context.h> - -extern void __weak sh4__flush_region_init(void); - -/* Wired TLB entry for the D-cache */ -static unsigned long long dtlb_cache_slot; - -/* - * The following group of functions deal with mapping and unmapping a - * temporary page into a DTLB slot that has been set aside for exclusive - * use. - */ -static inline void -sh64_setup_dtlb_cache_slot(unsigned long eaddr, unsigned long asid, - unsigned long paddr) -{ - local_irq_disable(); - sh64_setup_tlb_slot(dtlb_cache_slot, eaddr, asid, paddr); -} - -static inline void sh64_teardown_dtlb_cache_slot(void) -{ - sh64_teardown_tlb_slot(dtlb_cache_slot); - local_irq_enable(); -} - -static inline void sh64_icache_inv_all(void) -{ - unsigned long long addr, flag, data; - unsigned long flags; - - addr = ICCR0; - flag = ICCR0_ICI; - data = 0; - - /* Make this a critical section for safety (probably not strictly necessary.) */ - local_irq_save(flags); - - /* Without %1 it gets unexplicably wrong */ - __asm__ __volatile__ ( - "getcfg %3, 0, %0\n\t" - "or %0, %2, %0\n\t" - "putcfg %3, 0, %0\n\t" - "synci" - : "=&r" (data) - : "0" (data), "r" (flag), "r" (addr)); - - local_irq_restore(flags); -} - -static void sh64_icache_inv_kernel_range(unsigned long start, unsigned long end) -{ - /* Invalidate range of addresses [start,end] from the I-cache, where - * the addresses lie in the kernel superpage. */ - - unsigned long long ullend, addr, aligned_start; - aligned_start = (unsigned long long)(signed long long)(signed long) start; - addr = L1_CACHE_ALIGN(aligned_start); - ullend = (unsigned long long) (signed long long) (signed long) end; - - while (addr <= ullend) { - __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); - addr += L1_CACHE_BYTES; - } -} - -static void sh64_icache_inv_user_page(struct vm_area_struct *vma, unsigned long eaddr) -{ - /* If we get called, we know that vma->vm_flags contains VM_EXEC. - Also, eaddr is page-aligned. */ - unsigned int cpu = smp_processor_id(); - unsigned long long addr, end_addr; - unsigned long flags = 0; - unsigned long running_asid, vma_asid; - addr = eaddr; - end_addr = addr + PAGE_SIZE; - - /* Check whether we can use the current ASID for the I-cache - invalidation. For example, if we're called via - access_process_vm->flush_cache_page->here, (e.g. when reading from - /proc), 'running_asid' will be that of the reader, not of the - victim. - - Also, note the risk that we might get pre-empted between the ASID - compare and blocking IRQs, and before we regain control, the - pid->ASID mapping changes. However, the whole cache will get - invalidated when the mapping is renewed, so the worst that can - happen is that the loop below ends up invalidating somebody else's - cache entries. - */ - - running_asid = get_asid(); - vma_asid = cpu_asid(cpu, vma->vm_mm); - if (running_asid != vma_asid) { - local_irq_save(flags); - switch_and_save_asid(vma_asid); - } - while (addr < end_addr) { - /* Worth unrolling a little */ - __asm__ __volatile__("icbi %0, 0" : : "r" (addr)); - __asm__ __volatile__("icbi %0, 32" : : "r" (addr)); - __asm__ __volatile__("icbi %0, 64" : : "r" (addr)); - __asm__ __volatile__("icbi %0, 96" : : "r" (addr)); - addr += 128; - } - if (running_asid != vma_asid) { - switch_and_save_asid(running_asid); - local_irq_restore(flags); - } -} - -static void sh64_icache_inv_user_page_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - /* Used for invalidating big chunks of I-cache, i.e. assume the range - is whole pages. If 'start' or 'end' is not page aligned, the code - is conservative and invalidates to the ends of the enclosing pages. - This is functionally OK, just a performance loss. */ - - /* See the comments below in sh64_dcache_purge_user_range() regarding - the choice of algorithm. However, for the I-cache option (2) isn't - available because there are no physical tags so aliases can't be - resolved. The icbi instruction has to be used through the user - mapping. Because icbi is cheaper than ocbp on a cache hit, it - would be cheaper to use the selective code for a large range than is - possible with the D-cache. Just assume 64 for now as a working - figure. - */ - int n_pages; - - if (!mm) - return; - - n_pages = ((end - start) >> PAGE_SHIFT); - if (n_pages >= 64) { - sh64_icache_inv_all(); - } else { - unsigned long aligned_start; - unsigned long eaddr; - unsigned long after_last_page_start; - unsigned long mm_asid, current_asid; - unsigned long flags = 0; - - mm_asid = cpu_asid(smp_processor_id(), mm); - current_asid = get_asid(); - - if (mm_asid != current_asid) { - /* Switch ASID and run the invalidate loop under cli */ - local_irq_save(flags); - switch_and_save_asid(mm_asid); - } - - aligned_start = start & PAGE_MASK; - after_last_page_start = PAGE_SIZE + ((end - 1) & PAGE_MASK); - - while (aligned_start < after_last_page_start) { - struct vm_area_struct *vma; - unsigned long vma_end; - vma = find_vma(mm, aligned_start); - if (!vma || (aligned_start <= vma->vm_end)) { - /* Avoid getting stuck in an error condition */ - aligned_start += PAGE_SIZE; - continue; - } - vma_end = vma->vm_end; - if (vma->vm_flags & VM_EXEC) { - /* Executable */ - eaddr = aligned_start; - while (eaddr < vma_end) { - sh64_icache_inv_user_page(vma, eaddr); - eaddr += PAGE_SIZE; - } - } - aligned_start = vma->vm_end; /* Skip to start of next region */ - } - - if (mm_asid != current_asid) { - switch_and_save_asid(current_asid); - local_irq_restore(flags); - } - } -} - -static void sh64_icache_inv_current_user_range(unsigned long start, unsigned long end) -{ - /* The icbi instruction never raises ITLBMISS. i.e. if there's not a - cache hit on the virtual tag the instruction ends there, without a - TLB lookup. */ - - unsigned long long aligned_start; - unsigned long long ull_end; - unsigned long long addr; - - ull_end = end; - - /* Just invalidate over the range using the natural addresses. TLB - miss handling will be OK (TBC). Since it's for the current process, - either we're already in the right ASID context, or the ASIDs have - been recycled since we were last active in which case we might just - invalidate another processes I-cache entries : no worries, just a - performance drop for him. */ - aligned_start = L1_CACHE_ALIGN(start); - addr = aligned_start; - while (addr < ull_end) { - __asm__ __volatile__ ("icbi %0, 0" : : "r" (addr)); - __asm__ __volatile__ ("nop"); - __asm__ __volatile__ ("nop"); - addr += L1_CACHE_BYTES; - } -} - -/* Buffer used as the target of alloco instructions to purge data from cache - sets by natural eviction. -- RPC */ -#define DUMMY_ALLOCO_AREA_SIZE ((L1_CACHE_BYTES << 10) + (1024 * 4)) -static unsigned char dummy_alloco_area[DUMMY_ALLOCO_AREA_SIZE] __cacheline_aligned = { 0, }; - -static inline void sh64_dcache_purge_sets(int sets_to_purge_base, int n_sets) -{ - /* Purge all ways in a particular block of sets, specified by the base - set number and number of sets. Can handle wrap-around, if that's - needed. */ - - int dummy_buffer_base_set; - unsigned long long eaddr, eaddr0, eaddr1; - int j; - int set_offset; - - dummy_buffer_base_set = ((int)&dummy_alloco_area & - cpu_data->dcache.entry_mask) >> - cpu_data->dcache.entry_shift; - set_offset = sets_to_purge_base - dummy_buffer_base_set; - - for (j = 0; j < n_sets; j++, set_offset++) { - set_offset &= (cpu_data->dcache.sets - 1); - eaddr0 = (unsigned long long)dummy_alloco_area + - (set_offset << cpu_data->dcache.entry_shift); - - /* - * Do one alloco which hits the required set per cache - * way. For write-back mode, this will purge the #ways - * resident lines. There's little point unrolling this - * loop because the allocos stall more if they're too - * close together. - */ - eaddr1 = eaddr0 + cpu_data->dcache.way_size * - cpu_data->dcache.ways; - - for (eaddr = eaddr0; eaddr < eaddr1; - eaddr += cpu_data->dcache.way_size) { - __asm__ __volatile__ ("alloco %0, 0" : : "r" (eaddr)); - __asm__ __volatile__ ("synco"); /* TAKum03020 */ - } - - eaddr1 = eaddr0 + cpu_data->dcache.way_size * - cpu_data->dcache.ways; - - for (eaddr = eaddr0; eaddr < eaddr1; - eaddr += cpu_data->dcache.way_size) { - /* - * Load from each address. Required because - * alloco is a NOP if the cache is write-through. - */ - if (test_bit(SH_CACHE_MODE_WT, &(cpu_data->dcache.flags))) - __raw_readb((unsigned long)eaddr); - } - } - - /* - * Don't use OCBI to invalidate the lines. That costs cycles - * directly. If the dummy block is just left resident, it will - * naturally get evicted as required. - */ -} - -/* - * Purge the entire contents of the dcache. The most efficient way to - * achieve this is to use alloco instructions on a region of unused - * memory equal in size to the cache, thereby causing the current - * contents to be discarded by natural eviction. The alternative, namely - * reading every tag, setting up a mapping for the corresponding page and - * doing an OCBP for the line, would be much more expensive. - */ -static void sh64_dcache_purge_all(void) -{ - - sh64_dcache_purge_sets(0, cpu_data->dcache.sets); -} - - -/* Assumes this address (+ (2**n_synbits) pages up from it) aren't used for - anything else in the kernel */ -#define MAGIC_PAGE0_START 0xffffffffec000000ULL - -/* Purge the physical page 'paddr' from the cache. It's known that any - * cache lines requiring attention have the same page colour as the the - * address 'eaddr'. - * - * This relies on the fact that the D-cache matches on physical tags when - * no virtual tag matches. So we create an alias for the original page - * and purge through that. (Alternatively, we could have done this by - * switching ASID to match the original mapping and purged through that, - * but that involves ASID switching cost + probably a TLBMISS + refill - * anyway.) - */ -static void sh64_dcache_purge_coloured_phy_page(unsigned long paddr, - unsigned long eaddr) -{ - unsigned long long magic_page_start; - unsigned long long magic_eaddr, magic_eaddr_end; - - magic_page_start = MAGIC_PAGE0_START + (eaddr & CACHE_OC_SYN_MASK); - - /* As long as the kernel is not pre-emptible, this doesn't need to be - under cli/sti. */ - sh64_setup_dtlb_cache_slot(magic_page_start, get_asid(), paddr); - - magic_eaddr = magic_page_start; - magic_eaddr_end = magic_eaddr + PAGE_SIZE; - - while (magic_eaddr < magic_eaddr_end) { - /* Little point in unrolling this loop - the OCBPs are blocking - and won't go any quicker (i.e. the loop overhead is parallel - to part of the OCBP execution.) */ - __asm__ __volatile__ ("ocbp %0, 0" : : "r" (magic_eaddr)); - magic_eaddr += L1_CACHE_BYTES; - } - - sh64_teardown_dtlb_cache_slot(); -} - -/* - * Purge a page given its physical start address, by creating a temporary - * 1 page mapping and purging across that. Even if we know the virtual - * address (& vma or mm) of the page, the method here is more elegant - * because it avoids issues of coping with page faults on the purge - * instructions (i.e. no special-case code required in the critical path - * in the TLB miss handling). - */ -static void sh64_dcache_purge_phy_page(unsigned long paddr) -{ - unsigned long long eaddr_start, eaddr, eaddr_end; - int i; - - /* As long as the kernel is not pre-emptible, this doesn't need to be - under cli/sti. */ - eaddr_start = MAGIC_PAGE0_START; - for (i = 0; i < (1 << CACHE_OC_N_SYNBITS); i++) { - sh64_setup_dtlb_cache_slot(eaddr_start, get_asid(), paddr); - - eaddr = eaddr_start; - eaddr_end = eaddr + PAGE_SIZE; - while (eaddr < eaddr_end) { - __asm__ __volatile__ ("ocbp %0, 0" : : "r" (eaddr)); - eaddr += L1_CACHE_BYTES; - } - - sh64_teardown_dtlb_cache_slot(); - eaddr_start += PAGE_SIZE; - } -} - -static void sh64_dcache_purge_user_pages(struct mm_struct *mm, - unsigned long addr, unsigned long end) -{ - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - spinlock_t *ptl; - unsigned long paddr; - - if (!mm) - return; /* No way to find physical address of page */ - - pgd = pgd_offset(mm, addr); - if (pgd_bad(*pgd)) - return; - - p4d = p4d_offset(pgd, addr); - if (p4d_none(*p4d) || p4d_bad(*p4d)) - return; - - pud = pud_offset(p4d, addr); - if (pud_none(*pud) || pud_bad(*pud)) - return; - - pmd = pmd_offset(pud, addr); - if (pmd_none(*pmd) || pmd_bad(*pmd)) - return; - - pte = pte_offset_map_lock(mm, pmd, addr, &ptl); - do { - entry = *pte; - if (pte_none(entry) || !pte_present(entry)) - continue; - paddr = pte_val(entry) & PAGE_MASK; - sh64_dcache_purge_coloured_phy_page(paddr, addr); - } while (pte++, addr += PAGE_SIZE, addr != end); - pte_unmap_unlock(pte - 1, ptl); -} - -/* - * There are at least 5 choices for the implementation of this, with - * pros (+), cons(-), comments(*): - * - * 1. ocbp each line in the range through the original user's ASID - * + no lines spuriously evicted - * - tlbmiss handling (must either handle faults on demand => extra - * special-case code in tlbmiss critical path), or map the page in - * advance (=> flush_tlb_range in advance to avoid multiple hits) - * - ASID switching - * - expensive for large ranges - * - * 2. temporarily map each page in the range to a special effective - * address and ocbp through the temporary mapping; relies on the - * fact that SH-5 OCB* always do TLB lookup and match on ptags (they - * never look at the etags) - * + no spurious evictions - * - expensive for large ranges - * * surely cheaper than (1) - * - * 3. walk all the lines in the cache, check the tags, if a match - * occurs create a page mapping to ocbp the line through - * + no spurious evictions - * - tag inspection overhead - * - (especially for small ranges) - * - potential cost of setting up/tearing down page mapping for - * every line that matches the range - * * cost partly independent of range size - * - * 4. walk all the lines in the cache, check the tags, if a match - * occurs use 4 * alloco to purge the line (+3 other probably - * innocent victims) by natural eviction - * + no tlb mapping overheads - * - spurious evictions - * - tag inspection overhead - * - * 5. implement like flush_cache_all - * + no tag inspection overhead - * - spurious evictions - * - bad for small ranges - * - * (1) can be ruled out as more expensive than (2). (2) appears best - * for small ranges. The choice between (3), (4) and (5) for large - * ranges and the range size for the large/small boundary need - * benchmarking to determine. - * - * For now use approach (2) for small ranges and (5) for large ones. - */ -static void sh64_dcache_purge_user_range(struct mm_struct *mm, - unsigned long start, unsigned long end) -{ - int n_pages = ((end - start) >> PAGE_SHIFT); - - if (n_pages >= 64 || ((start ^ (end - 1)) & PMD_MASK)) { - sh64_dcache_purge_all(); - } else { - /* Small range, covered by a single page table page */ - start &= PAGE_MASK; /* should already be so */ - end = PAGE_ALIGN(end); /* should already be so */ - sh64_dcache_purge_user_pages(mm, start, end); - } -} - -/* - * Invalidate the entire contents of both caches, after writing back to - * memory any dirty data from the D-cache. - */ -static void sh5_flush_cache_all(void *unused) -{ - sh64_dcache_purge_all(); - sh64_icache_inv_all(); -} - -/* - * Invalidate an entire user-address space from both caches, after - * writing back dirty data (e.g. for shared mmap etc). - * - * This could be coded selectively by inspecting all the tags then - * doing 4*alloco on any set containing a match (as for - * flush_cache_range), but fork/exit/execve (where this is called from) - * are expensive anyway. - * - * Have to do a purge here, despite the comments re I-cache below. - * There could be odd-coloured dirty data associated with the mm still - * in the cache - if this gets written out through natural eviction - * after the kernel has reused the page there will be chaos. - * - * The mm being torn down won't ever be active again, so any Icache - * lines tagged with its ASID won't be visible for the rest of the - * lifetime of this ASID cycle. Before the ASID gets reused, there - * will be a flush_cache_all. Hence we don't need to touch the - * I-cache. This is similar to the lack of action needed in - * flush_tlb_mm - see fault.c. - */ -static void sh5_flush_cache_mm(void *unused) -{ - sh64_dcache_purge_all(); -} - -/* - * Invalidate (from both caches) the range [start,end) of virtual - * addresses from the user address space specified by mm, after writing - * back any dirty data. - * - * Note, 'end' is 1 byte beyond the end of the range to flush. - */ -static void sh5_flush_cache_range(void *args) -{ - struct flusher_data *data = args; - struct vm_area_struct *vma; - unsigned long start, end; - - vma = data->vma; - start = data->addr1; - end = data->addr2; - - sh64_dcache_purge_user_range(vma->vm_mm, start, end); - sh64_icache_inv_user_page_range(vma->vm_mm, start, end); -} - -/* - * Invalidate any entries in either cache for the vma within the user - * address space vma->vm_mm for the page starting at virtual address - * 'eaddr'. This seems to be used primarily in breaking COW. Note, - * the I-cache must be searched too in case the page in question is - * both writable and being executed from (e.g. stack trampolines.) - * - * Note, this is called with pte lock held. - */ -static void sh5_flush_cache_page(void *args) -{ - struct flusher_data *data = args; - struct vm_area_struct *vma; - unsigned long eaddr, pfn; - - vma = data->vma; - eaddr = data->addr1; - pfn = data->addr2; - - sh64_dcache_purge_phy_page(pfn << PAGE_SHIFT); - - if (vma->vm_flags & VM_EXEC) - sh64_icache_inv_user_page(vma, eaddr); -} - -static void sh5_flush_dcache_page(void *page) -{ - sh64_dcache_purge_phy_page(page_to_phys((struct page *)page)); - wmb(); -} - -/* - * Flush the range [start,end] of kernel virtual address space from - * the I-cache. The corresponding range must be purged from the - * D-cache also because the SH-5 doesn't have cache snooping between - * the caches. The addresses will be visible through the superpage - * mapping, therefore it's guaranteed that there no cache entries for - * the range in cache sets of the wrong colour. - */ -static void sh5_flush_icache_range(void *args) -{ - struct flusher_data *data = args; - unsigned long start, end; - - start = data->addr1; - end = data->addr2; - - __flush_purge_region((void *)start, end); - wmb(); - sh64_icache_inv_kernel_range(start, end); -} - -/* - * For the address range [start,end), write back the data from the - * D-cache and invalidate the corresponding region of the I-cache for the - * current process. Used to flush signal trampolines on the stack to - * make them executable. - */ -static void sh5_flush_cache_sigtramp(void *vaddr) -{ - unsigned long end = (unsigned long)vaddr + L1_CACHE_BYTES; - - __flush_wback_region(vaddr, L1_CACHE_BYTES); - wmb(); - sh64_icache_inv_current_user_range((unsigned long)vaddr, end); -} - -void __init sh5_cache_init(void) -{ - local_flush_cache_all = sh5_flush_cache_all; - local_flush_cache_mm = sh5_flush_cache_mm; - local_flush_cache_dup_mm = sh5_flush_cache_mm; - local_flush_cache_page = sh5_flush_cache_page; - local_flush_cache_range = sh5_flush_cache_range; - local_flush_dcache_page = sh5_flush_dcache_page; - local_flush_icache_range = sh5_flush_icache_range; - local_flush_cache_sigtramp = sh5_flush_cache_sigtramp; - - /* Reserve a slot for dcache colouring in the DTLB */ - dtlb_cache_slot = sh64_get_wired_dtlb_entry(); - - sh4__flush_region_init(); -} diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 464f160a9576..3aef78ceb820 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -355,12 +355,6 @@ void __init cpu_cache_init(void) } } - if (boot_cpu_data.family == CPU_FAMILY_SH5) { - extern void __weak sh5_cache_init(void); - - sh5_cache_init(); - } - skip: emit_cache_params(); } diff --git a/arch/sh/mm/extable_64.c b/arch/sh/mm/extable_64.c deleted file mode 100644 index 7a3b4d33d2e7..000000000000 --- a/arch/sh/mm/extable_64.c +++ /dev/null @@ -1,84 +0,0 @@ -/* - * arch/sh/mm/extable_64.c - * - * Copyright (C) 2003 Richard Curnow - * Copyright (C) 2003, 2004 Paul Mundt - * - * Cloned from the 2.5 SH version.. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/bsearch.h> -#include <linux/rwsem.h> -#include <linux/extable.h> -#include <linux/uaccess.h> - -extern unsigned long copy_user_memcpy, copy_user_memcpy_end; -extern void __copy_user_fixup(void); - -static const struct exception_table_entry __copy_user_fixup_ex = { - .fixup = (unsigned long)&__copy_user_fixup, -}; - -/* - * Some functions that may trap due to a bad user-mode address have too - * many loads and stores in them to make it at all practical to label - * each one and put them all in the main exception table. - * - * In particular, the fast memcpy routine is like this. It's fix-up is - * just to fall back to a slow byte-at-a-time copy, which is handled the - * conventional way. So it's functionally OK to just handle any trap - * occurring in the fast memcpy with that fixup. - */ -static const struct exception_table_entry *check_exception_ranges(unsigned long addr) -{ - if ((addr >= (unsigned long)©_user_memcpy) && - (addr <= (unsigned long)©_user_memcpy_end)) - return &__copy_user_fixup_ex; - - return NULL; -} - -static int cmp_ex_search(const void *key, const void *elt) -{ - const struct exception_table_entry *_elt = elt; - unsigned long _key = *(unsigned long *)key; - - /* avoid overflow */ - if (_key > _elt->insn) - return 1; - if (_key < _elt->insn) - return -1; - return 0; -} - -/* Simple binary search */ -const struct exception_table_entry * -search_extable(const struct exception_table_entry *base, - const size_t num, - unsigned long value) -{ - const struct exception_table_entry *mid; - - mid = check_exception_ranges(value); - if (mid) - return mid; - - return bsearch(&value, base, num, - sizeof(struct exception_table_entry), cmp_ex_search); -} - -int fixup_exception(struct pt_regs *regs) -{ - const struct exception_table_entry *fixup; - - fixup = search_exception_tables(regs->pc); - if (fixup) { - regs->pc = fixup->fixup; - return 1; - } - - return 0; -} diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c deleted file mode 100644 index e4bb2a8e0a69..000000000000 --- a/arch/sh/mm/tlb-sh5.c +++ /dev/null @@ -1,224 +0,0 @@ -/* - * arch/sh/mm/tlb-sh5.c - * - * Copyright (C) 2003 Paul Mundt <lethal@linux-sh.org> - * Copyright (C) 2003 Richard Curnow <richard.curnow@superh.com> - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/mm.h> -#include <linux/init.h> -#include <asm/page.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h> - -/** - * sh64_tlb_init - Perform initial setup for the DTLB and ITLB. - */ -int sh64_tlb_init(void) -{ - /* Assign some sane DTLB defaults */ - cpu_data->dtlb.entries = 64; - cpu_data->dtlb.step = 0x10; - - cpu_data->dtlb.first = DTLB_FIXED | cpu_data->dtlb.step; - cpu_data->dtlb.next = cpu_data->dtlb.first; - - cpu_data->dtlb.last = DTLB_FIXED | - ((cpu_data->dtlb.entries - 1) * - cpu_data->dtlb.step); - - /* And again for the ITLB */ - cpu_data->itlb.entries = 64; - cpu_data->itlb.step = 0x10; - - cpu_data->itlb.first = ITLB_FIXED | cpu_data->itlb.step; - cpu_data->itlb.next = cpu_data->itlb.first; - cpu_data->itlb.last = ITLB_FIXED | - ((cpu_data->itlb.entries - 1) * - cpu_data->itlb.step); - - return 0; -} - -/** - * sh64_next_free_dtlb_entry - Find the next available DTLB entry - */ -unsigned long long sh64_next_free_dtlb_entry(void) -{ - return cpu_data->dtlb.next; -} - -/** - * sh64_get_wired_dtlb_entry - Allocate a wired (locked-in) entry in the DTLB - */ -unsigned long long sh64_get_wired_dtlb_entry(void) -{ - unsigned long long entry = sh64_next_free_dtlb_entry(); - - cpu_data->dtlb.first += cpu_data->dtlb.step; - cpu_data->dtlb.next += cpu_data->dtlb.step; - - return entry; -} - -/** - * sh64_put_wired_dtlb_entry - Free a wired (locked-in) entry in the DTLB. - * - * @entry: Address of TLB slot. - * - * Works like a stack, last one to allocate must be first one to free. - */ -int sh64_put_wired_dtlb_entry(unsigned long long entry) -{ - __flush_tlb_slot(entry); - - /* - * We don't do any particularly useful tracking of wired entries, - * so this approach works like a stack .. last one to be allocated - * has to be the first one to be freed. - * - * We could potentially load wired entries into a list and work on - * rebalancing the list periodically (which also entails moving the - * contents of a TLB entry) .. though I have a feeling that this is - * more trouble than it's worth. - */ - - /* - * Entry must be valid .. we don't want any ITLB addresses! - */ - if (entry <= DTLB_FIXED) - return -EINVAL; - - /* - * Next, check if we're within range to be freed. (ie, must be the - * entry beneath the first 'free' entry! - */ - if (entry < (cpu_data->dtlb.first - cpu_data->dtlb.step)) - return -EINVAL; - - /* If we are, then bring this entry back into the list */ - cpu_data->dtlb.first -= cpu_data->dtlb.step; - cpu_data->dtlb.next = entry; - - return 0; -} - -/** - * sh64_setup_tlb_slot - Load up a translation in a wired slot. - * - * @config_addr: Address of TLB slot. - * @eaddr: Virtual address. - * @asid: Address Space Identifier. - * @paddr: Physical address. - * - * Load up a virtual<->physical translation for @eaddr<->@paddr in the - * pre-allocated TLB slot @config_addr (see sh64_get_wired_dtlb_entry). - */ -void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr, - unsigned long asid, unsigned long paddr) -{ - unsigned long long pteh, ptel; - - pteh = neff_sign_extend(eaddr); - pteh &= PAGE_MASK; - pteh |= (asid << PTEH_ASID_SHIFT) | PTEH_VALID; - ptel = neff_sign_extend(paddr); - ptel &= PAGE_MASK; - ptel |= (_PAGE_CACHABLE | _PAGE_READ | _PAGE_WRITE); - - asm volatile("putcfg %0, 1, %1\n\t" - "putcfg %0, 0, %2\n" - : : "r" (config_addr), "r" (ptel), "r" (pteh)); -} - -/** - * sh64_teardown_tlb_slot - Teardown a translation. - * - * @config_addr: Address of TLB slot. - * - * Teardown any existing mapping in the TLB slot @config_addr. - */ -void sh64_teardown_tlb_slot(unsigned long long config_addr) - __attribute__ ((alias("__flush_tlb_slot"))); - -static int dtlb_entry; -static unsigned long long dtlb_entries[64]; - -void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) -{ - unsigned long long entry; - unsigned long paddr, flags; - - BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries)); - - local_irq_save(flags); - - entry = sh64_get_wired_dtlb_entry(); - dtlb_entries[dtlb_entry++] = entry; - - paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK; - paddr &= ~PAGE_MASK; - - sh64_setup_tlb_slot(entry, addr, get_asid(), paddr); - - local_irq_restore(flags); -} - -void tlb_unwire_entry(void) -{ - unsigned long long entry; - unsigned long flags; - - BUG_ON(!dtlb_entry); - - local_irq_save(flags); - entry = dtlb_entries[dtlb_entry--]; - - sh64_teardown_tlb_slot(entry); - sh64_put_wired_dtlb_entry(entry); - - local_irq_restore(flags); -} - -void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) -{ - unsigned long long ptel; - unsigned long long pteh=0; - struct tlb_info *tlbp; - unsigned long long next; - unsigned int fault_code = get_thread_fault_code(); - - /* Get PTEL first */ - ptel = pte.pte_low; - - /* - * Set PTEH register - */ - pteh = neff_sign_extend(address & MMU_VPN_MASK); - - /* Set the ASID. */ - pteh |= get_asid() << PTEH_ASID_SHIFT; - pteh |= PTEH_VALID; - - /* Set PTEL register, set_pte has performed the sign extension */ - ptel &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ - - if (fault_code & FAULT_CODE_ITLB) - tlbp = &cpu_data->itlb; - else - tlbp = &cpu_data->dtlb; - - next = tlbp->next; - __flush_tlb_slot(next); - asm volatile ("putcfg %0,1,%2\n\n\t" - "putcfg %0,0,%1\n" - : : "r" (next), "r" (pteh), "r" (ptel) ); - - next += TLB_STEP; - if (next > tlbp->last) - next = tlbp->first; - tlbp->next = next; -} diff --git a/arch/sh/mm/tlbex_64.c b/arch/sh/mm/tlbex_64.c deleted file mode 100644 index 0d015f7556fa..000000000000 --- a/arch/sh/mm/tlbex_64.c +++ /dev/null @@ -1,171 +0,0 @@ -/* - * The SH64 TLB miss. - * - * Original code from fault.c - * Copyright (C) 2000, 2001 Paolo Alberelli - * - * Fast PTE->TLB refill path - * Copyright (C) 2003 Richard.Curnow@superh.com - * - * IMPORTANT NOTES : - * The do_fast_page_fault function is called from a context in entry.S - * where very few registers have been saved. In particular, the code in - * this file must be compiled not to use ANY caller-save registers that - * are not part of the restricted save set. Also, it means that code in - * this file must not make calls to functions elsewhere in the kernel, or - * else the excepting context will see corruption in its caller-save - * registers. Plus, the entry.S save area is non-reentrant, so this code - * has to run with SR.BL==1, i.e. no interrupts taken inside it and panic - * on any exception. - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <linux/kprobes.h> -#include <asm/tlb.h> -#include <asm/io.h> -#include <linux/uaccess.h> -#include <asm/pgalloc.h> -#include <asm/mmu_context.h> - -static int handle_tlbmiss(unsigned long long protection_flags, - unsigned long address) -{ - pgd_t *pgd; - p4d_t *p4d; - pud_t *pud; - pmd_t *pmd; - pte_t *pte; - pte_t entry; - - if (is_vmalloc_addr((void *)address)) { - pgd = pgd_offset_k(address); - } else { - if (unlikely(address >= TASK_SIZE || !current->mm)) - return 1; - - pgd = pgd_offset(current->mm, address); - } - - p4d = p4d_offset(pgd, address); - if (p4d_none(*p4d) || !p4d_present(*p4d)) - return 1; - - pud = pud_offset(p4d, address); - if (pud_none(*pud) || !pud_present(*pud)) - return 1; - - pmd = pmd_offset(pud, address); - if (pmd_none(*pmd) || !pmd_present(*pmd)) - return 1; - - pte = pte_offset_kernel(pmd, address); - entry = *pte; - if (pte_none(entry) || !pte_present(entry)) - return 1; - - /* - * If the page doesn't have sufficient protection bits set to - * service the kind of fault being handled, there's not much - * point doing the TLB refill. Punt the fault to the general - * handler. - */ - if ((pte_val(entry) & protection_flags) != protection_flags) - return 1; - - update_mmu_cache(NULL, address, pte); - - return 0; -} - -/* - * Put all this information into one structure so that everything is just - * arithmetic relative to a single base address. This reduces the number - * of movi/shori pairs needed just to load addresses of static data. - */ -struct expevt_lookup { - unsigned short protection_flags[8]; - unsigned char is_text_access[8]; - unsigned char is_write_access[8]; -}; - -#define PRU (1<<9) -#define PRW (1<<8) -#define PRX (1<<7) -#define PRR (1<<6) - -/* Sized as 8 rather than 4 to allow checking the PTE's PRU bit against whether - the fault happened in user mode or privileged mode. */ -static struct expevt_lookup expevt_lookup_table = { - .protection_flags = {PRX, PRX, 0, 0, PRR, PRR, PRW, PRW}, - .is_text_access = {1, 1, 0, 0, 0, 0, 0, 0} -}; - -static inline unsigned int -expevt_to_fault_code(unsigned long expevt) -{ - if (expevt == 0xa40) - return FAULT_CODE_ITLB; - else if (expevt == 0x060) - return FAULT_CODE_WRITE; - - return 0; -} - -/* - This routine handles page faults that can be serviced just by refilling a - TLB entry from an existing page table entry. (This case represents a very - large majority of page faults.) Return 1 if the fault was successfully - handled. Return 0 if the fault could not be handled. (This leads into the - general fault handling in fault.c which deals with mapping file-backed - pages, stack growth, segmentation faults, swapping etc etc) - */ -asmlinkage int __kprobes -do_fast_page_fault(unsigned long long ssr_md, unsigned long long expevt, - unsigned long address) -{ - unsigned long long protection_flags; - unsigned long long index; - unsigned long long expevt4; - unsigned int fault_code; - - /* The next few lines implement a way of hashing EXPEVT into a - * small array index which can be used to lookup parameters - * specific to the type of TLBMISS being handled. - * - * Note: - * ITLBMISS has EXPEVT==0xa40 - * RTLBMISS has EXPEVT==0x040 - * WTLBMISS has EXPEVT==0x060 - */ - expevt4 = (expevt >> 4); - /* TODO : xor ssr_md into this expression too. Then we can check - * that PRU is set when it needs to be. */ - index = expevt4 ^ (expevt4 >> 5); - index &= 7; - - fault_code = expevt_to_fault_code(expevt); - - protection_flags = expevt_lookup_table.protection_flags[index]; - - if (expevt_lookup_table.is_text_access[index]) - fault_code |= FAULT_CODE_ITLB; - if (!ssr_md) - fault_code |= FAULT_CODE_USER; - - set_thread_fault_code(fault_code); - - return handle_tlbmiss(protection_flags, address); -} diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c deleted file mode 100644 index bd0715d5dca4..000000000000 --- a/arch/sh/mm/tlbflush_64.c +++ /dev/null @@ -1,172 +0,0 @@ -/* - * arch/sh/mm/tlb-flush_64.c - * - * Copyright (C) 2000, 2001 Paolo Alberelli - * Copyright (C) 2003 Richard Curnow (/proc/tlb, bug fixes) - * Copyright (C) 2003 - 2012 Paul Mundt - * - * This file is subject to the terms and conditions of the GNU General Public - * License. See the file "COPYING" in the main directory of this archive - * for more details. - */ -#include <linux/signal.h> -#include <linux/rwsem.h> -#include <linux/sched.h> -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/string.h> -#include <linux/types.h> -#include <linux/ptrace.h> -#include <linux/mman.h> -#include <linux/mm.h> -#include <linux/smp.h> -#include <linux/perf_event.h> -#include <linux/interrupt.h> -#include <asm/io.h> -#include <asm/tlb.h> -#include <linux/uaccess.h> -#include <asm/pgalloc.h> -#include <asm/mmu_context.h> - -void local_flush_tlb_one(unsigned long asid, unsigned long page) -{ - unsigned long long match, pteh=0, lpage; - unsigned long tlb; - - /* - * Sign-extend based on neff. - */ - lpage = neff_sign_extend(page); - match = (asid << PTEH_ASID_SHIFT) | PTEH_VALID; - match |= lpage; - - for_each_itlb_entry(tlb) { - asm volatile ("getcfg %1, 0, %0" - : "=r" (pteh) - : "r" (tlb) ); - - if (pteh == match) { - __flush_tlb_slot(tlb); - break; - } - } - - for_each_dtlb_entry(tlb) { - asm volatile ("getcfg %1, 0, %0" - : "=r" (pteh) - : "r" (tlb) ); - - if (pteh == match) { - __flush_tlb_slot(tlb); - break; - } - - } -} - -void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - unsigned long flags; - - if (vma->vm_mm) { - page &= PAGE_MASK; - local_irq_save(flags); - local_flush_tlb_one(get_asid(), page); - local_irq_restore(flags); - } -} - -void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - unsigned long flags; - unsigned long long match, pteh=0, pteh_epn, pteh_low; - unsigned long tlb; - unsigned int cpu = smp_processor_id(); - struct mm_struct *mm; - - mm = vma->vm_mm; - if (cpu_context(cpu, mm) == NO_CONTEXT) - return; - - local_irq_save(flags); - - start &= PAGE_MASK; - end &= PAGE_MASK; - - match = (cpu_asid(cpu, mm) << PTEH_ASID_SHIFT) | PTEH_VALID; - - /* Flush ITLB */ - for_each_itlb_entry(tlb) { - asm volatile ("getcfg %1, 0, %0" - : "=r" (pteh) - : "r" (tlb) ); - - pteh_epn = pteh & PAGE_MASK; - pteh_low = pteh & ~PAGE_MASK; - - if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) - __flush_tlb_slot(tlb); - } - - /* Flush DTLB */ - for_each_dtlb_entry(tlb) { - asm volatile ("getcfg %1, 0, %0" - : "=r" (pteh) - : "r" (tlb) ); - - pteh_epn = pteh & PAGE_MASK; - pteh_low = pteh & ~PAGE_MASK; - - if (pteh_low == match && pteh_epn >= start && pteh_epn <= end) - __flush_tlb_slot(tlb); - } - - local_irq_restore(flags); -} - -void local_flush_tlb_mm(struct mm_struct *mm) -{ - unsigned long flags; - unsigned int cpu = smp_processor_id(); - - if (cpu_context(cpu, mm) == NO_CONTEXT) - return; - - local_irq_save(flags); - - cpu_context(cpu, mm) = NO_CONTEXT; - if (mm == current->mm) - activate_context(mm, cpu); - - local_irq_restore(flags); -} - -void local_flush_tlb_all(void) -{ - /* Invalidate all, including shared pages, excluding fixed TLBs */ - unsigned long flags, tlb; - - local_irq_save(flags); - - /* Flush each ITLB entry */ - for_each_itlb_entry(tlb) - __flush_tlb_slot(tlb); - - /* Flush each DTLB entry */ - for_each_dtlb_entry(tlb) - __flush_tlb_slot(tlb); - - local_irq_restore(flags); -} - -void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) -{ - /* FIXME: Optimize this later.. */ - flush_tlb_all(); -} - -void __flush_tlb_global(void) -{ - flush_tlb_all(); -} |