diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 37 | ||||
-rw-r--r-- | arch/arm/mm/cache-l2x0.c | 12 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/context.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/hugetlbpage.c | 43 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 5 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 61 | ||||
-rw-r--r-- | arch/arm/mm/nommu.c | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-feroceon.S | 26 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-2level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7-3level.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 27 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 8 |
14 files changed, 159 insertions, 83 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 6cacdc8dd654..cd2c88e7a8f7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -421,24 +421,28 @@ config CPU_32v3 select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v4T bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v5 bool select CPU_USE_DOMAINS if MMU select NEEDS_SYSCALL_FOR_CMPXCHG if SMP select TLS_REG_EMUL if SMP || !MMU + select NEED_KUSER_HELPERS config CPU_32v6 bool @@ -776,6 +780,7 @@ config CPU_BPREDICT_DISABLE config TLS_REG_EMUL bool + select NEED_KUSER_HELPERS help An SMP system using a pre-ARMv6 processor (there are apparently a few prototypes like that in existence) and therefore access to @@ -783,11 +788,43 @@ config TLS_REG_EMUL config NEEDS_SYSCALL_FOR_CMPXCHG bool + select NEED_KUSER_HELPERS help SMP on a pre-ARMv6 processor? Well OK then. Forget about fast user space cmpxchg support. It is just not possible. +config NEED_KUSER_HELPERS + bool + +config KUSER_HELPERS + bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS + default y + help + Warning: disabling this option may break user programs. + + Provide kuser helpers in the vector page. The kernel provides + helper code to userspace in read only form at a fixed location + in the high vector page to allow userspace to be independent of + the CPU type fitted to the system. This permits binaries to be + run on ARMv4 through to ARMv7 without modification. + + See Documentation/arm/kernel_user_helpers.txt for details. + + However, the fixed address nature of these helpers can be used + by ROP (return orientated programming) authors when creating + exploits. + + If all of the binaries and libraries which run on your platform + are built specifically for your platform, and make no use of + these helpers, then you can turn this option off to hinder + such exploits. However, in that case, if a binary or library + relying on those helpers is run, it will receive a SIGILL signal, + which will terminate the program. + + Say N here only if you are absolutely certain that you do not + need these helpers; otherwise, the safe option is to say Y. + config DMA_CACHE_RWFO bool "Enable read/write for ownership DMA cache maintenance" depends on CPU_V6K && SMP diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index d70e0aba0c9d..447da6ffadd5 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -290,7 +290,7 @@ static void l2x0_disable(void) raw_spin_lock_irqsave(&l2x0_lock, flags); __l2x0_flush_all(); writel_relaxed(0, l2x0_base + L2X0_CTRL); - dsb(); + dsb(st); raw_spin_unlock_irqrestore(&l2x0_lock, flags); } @@ -417,9 +417,9 @@ void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) outer_cache.disable = l2x0_disable; } - printk(KERN_INFO "%s cache controller enabled\n", type); - printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n", - ways, cache_id, aux, l2x0_size); + pr_info("%s cache controller enabled\n", type); + pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", + ways, cache_id, aux, l2x0_size >> 10); } #ifdef CONFIG_OF @@ -929,7 +929,9 @@ static const struct of_device_id l2x0_ids[] __initconst = { .data = (void *)&aurora_no_outer_data}, { .compatible = "marvell,aurora-outer-cache", .data = (void *)&aurora_with_outer_data}, - { .compatible = "bcm,bcm11351-a2-pl310-cache", + { .compatible = "brcm,bcm11351-a2-pl310-cache", + .data = (void *)&bcm_l2x0_data}, + { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */ .data = (void *)&bcm_l2x0_data}, {} }; diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 515b00064da8..b5c467a65c27 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -282,7 +282,7 @@ ENTRY(v7_coherent_user_range) add r12, r12, r2 cmp r12, r1 blo 1b - dsb + dsb ishst icache_line_size r2, r3 sub r3, r2, #1 bic r12, r0, r3 @@ -294,7 +294,7 @@ ENTRY(v7_coherent_user_range) mov r0, #0 ALT_SMP(mcr p15, 0, r0, c7, c1, 6) @ invalidate BTB Inner Shareable ALT_UP(mcr p15, 0, r0, c7, c5, 6) @ invalidate BTB - dsb + dsb ishst isb mov pc, lr diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index b55b1015724b..84e6f772e204 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -162,10 +162,7 @@ static void flush_context(unsigned int cpu) } /* Queue a TLB invalidate and flush the I-cache if necessary. */ - if (!tlb_ops_need_broadcast()) - cpumask_set_cpu(cpu, &tlb_flush_pending); - else - cpumask_setall(&tlb_flush_pending); + cpumask_setall(&tlb_flush_pending); if (icache_is_vivt_asid_tagged()) __flush_icache_all(); @@ -245,7 +242,6 @@ void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk) if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) { local_flush_bp_all(); local_flush_tlb_all(); - dummy_flush_tlb_a15_erratum(); } atomic64_set(&per_cpu(active_asids, cpu), asid); diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 7f9b1798c6cf..f5e1a8471714 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -358,7 +358,7 @@ static int __init atomic_pool_init(void) if (!pages) goto no_pages; - if (IS_ENABLED(CONFIG_CMA)) + if (IS_ENABLED(CONFIG_DMA_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init); else @@ -455,7 +455,6 @@ static void __dma_remap(struct page *page, size_t size, pgprot_t prot) unsigned end = start + size; apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot); - dsb(); flush_tlb_kernel_range(start, end); } @@ -670,7 +669,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); - else if (!IS_ENABLED(CONFIG_CMA)) + else if (!IS_ENABLED(CONFIG_DMA_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page, caller); @@ -759,7 +758,7 @@ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; - } else if (!IS_ENABLED(CONFIG_CMA)) { + } else if (!IS_ENABLED(CONFIG_DMA_CMA)) { __dma_free_remap(cpu_addr, size); __dma_free_buffer(page, size); } else { diff --git a/arch/arm/mm/hugetlbpage.c b/arch/arm/mm/hugetlbpage.c index 3d1e4a205b0b..66781bf34077 100644 --- a/arch/arm/mm/hugetlbpage.c +++ b/arch/arm/mm/hugetlbpage.c @@ -36,22 +36,6 @@ * of type casting from pmd_t * to pte_t *. */ -pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) -{ - pgd_t *pgd; - pud_t *pud; - pmd_t *pmd = NULL; - - pgd = pgd_offset(mm, addr); - if (pgd_present(*pgd)) { - pud = pud_offset(pgd, addr); - if (pud_present(*pud)) - pmd = pmd_offset(pud, addr); - } - - return (pte_t *)pmd; -} - struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) { @@ -68,33 +52,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep) return 0; } -pte_t *huge_pte_alloc(struct mm_struct *mm, - unsigned long addr, unsigned long sz) -{ - pgd_t *pgd; - pud_t *pud; - pte_t *pte = NULL; - - pgd = pgd_offset(mm, addr); - pud = pud_alloc(mm, pgd, addr); - if (pud) - pte = (pte_t *)pmd_alloc(mm, pud, addr); - - return pte; -} - -struct page * -follow_huge_pmd(struct mm_struct *mm, unsigned long address, - pmd_t *pmd, int write) -{ - struct page *page; - - page = pte_page(*(pte_t *)pmd); - if (page) - page += ((address & ~PMD_MASK) >> PAGE_SHIFT); - return page; -} - int pmd_huge(pmd_t pmd) { return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 15225d829d71..2958e74fc42c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -231,7 +231,7 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, } #endif -void __init setup_dma_zone(struct machine_desc *mdesc) +void __init setup_dma_zone(const struct machine_desc *mdesc) { #ifdef CONFIG_ZONE_DMA if (mdesc->dma_zone_size) { @@ -335,7 +335,8 @@ phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align) return phys; } -void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) +void __init arm_memblock_init(struct meminfo *mi, + const struct machine_desc *mdesc) { int i; diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4f56617a2392..b1d17eeb59b8 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -989,6 +989,7 @@ phys_addr_t arm_lowmem_limit __initdata = 0; void __init sanity_check_meminfo(void) { + phys_addr_t memblock_limit = 0; int i, j, highmem = 0; phys_addr_t vmalloc_limit = __pa(vmalloc_min - 1) + 1; @@ -1052,9 +1053,32 @@ void __init sanity_check_meminfo(void) bank->size = size_limit; } #endif - if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit) - arm_lowmem_limit = bank->start + bank->size; + if (!bank->highmem) { + phys_addr_t bank_end = bank->start + bank->size; + if (bank_end > arm_lowmem_limit) + arm_lowmem_limit = bank_end; + + /* + * Find the first non-section-aligned page, and point + * memblock_limit at it. This relies on rounding the + * limit down to be section-aligned, which happens at + * the end of this function. + * + * With this algorithm, the start or end of almost any + * bank can be non-section-aligned. The only exception + * is that the start of the bank 0 must be section- + * aligned, since otherwise memory would need to be + * allocated when mapping the start of bank 0, which + * occurs before any free memory is mapped. + */ + if (!memblock_limit) { + if (!IS_ALIGNED(bank->start, SECTION_SIZE)) + memblock_limit = bank->start; + else if (!IS_ALIGNED(bank_end, SECTION_SIZE)) + memblock_limit = bank_end; + } + } j++; } #ifdef CONFIG_HIGHMEM @@ -1079,7 +1103,18 @@ void __init sanity_check_meminfo(void) #endif meminfo.nr_banks = j; high_memory = __va(arm_lowmem_limit - 1) + 1; - memblock_set_current_limit(arm_lowmem_limit); + + /* + * Round the memblock limit down to a section size. This + * helps to ensure that we will allocate memory from the + * last full section, which should be mapped. + */ + if (memblock_limit) + memblock_limit = round_down(memblock_limit, SECTION_SIZE); + if (!memblock_limit) + memblock_limit = arm_lowmem_limit; + + memblock_set_current_limit(memblock_limit); } static inline void prepare_page_table(void) @@ -1151,7 +1186,7 @@ void __init arm_mm_memblock_reserve(void) * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. */ -static void __init devicemaps_init(struct machine_desc *mdesc) +static void __init devicemaps_init(const struct machine_desc *mdesc) { struct map_desc map; unsigned long addr; @@ -1160,7 +1195,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) /* * Allocate the vector page early. */ - vectors = early_alloc(PAGE_SIZE); + vectors = early_alloc(PAGE_SIZE * 2); early_trap_init(vectors); @@ -1205,15 +1240,27 @@ static void __init devicemaps_init(struct machine_desc *mdesc) map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = 0xffff0000; map.length = PAGE_SIZE; +#ifdef CONFIG_KUSER_HELPERS map.type = MT_HIGH_VECTORS; +#else + map.type = MT_LOW_VECTORS; +#endif create_mapping(&map); if (!vectors_high()) { map.virtual = 0; + map.length = PAGE_SIZE * 2; map.type = MT_LOW_VECTORS; create_mapping(&map); } + /* Now create a kernel read-only mapping */ + map.pfn += 1; + map.virtual = 0xffff0000 + PAGE_SIZE; + map.length = PAGE_SIZE; + map.type = MT_LOW_VECTORS; + create_mapping(&map); + /* * Ask the machine support to map in the statically mapped devices. */ @@ -1272,12 +1319,10 @@ static void __init map_lowmem(void) * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct machine_desc *mdesc) +void __init paging_init(const struct machine_desc *mdesc) { void *zero_page; - memblock_set_current_limit(arm_lowmem_limit); - build_mem_type_table(); prepare_page_table(); map_lowmem(); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 1fa50100ab6a..34d4ab217bab 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -299,7 +299,7 @@ void __init sanity_check_meminfo(void) * paging_init() sets up the page tables, initialises the zone memory * maps, and sets up the zero page, bad page and bad page tables. */ -void __init paging_init(struct machine_desc *mdesc) +void __init paging_init(const struct machine_desc *mdesc) { early_trap_init((void *)CONFIG_VECTORS_BASE); mpu_setup(); diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index d5146b98c8d1..db79b62c92fb 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -514,6 +514,32 @@ ENTRY(cpu_feroceon_set_pte_ext) #endif mov pc, lr +/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */ +.globl cpu_feroceon_suspend_size +.equ cpu_feroceon_suspend_size, 4 * 3 +#ifdef CONFIG_ARM_CPU_SUSPEND +ENTRY(cpu_feroceon_do_suspend) + stmfd sp!, {r4 - r6, lr} + mrc p15, 0, r4, c13, c0, 0 @ PID + mrc p15, 0, r5, c3, c0, 0 @ Domain ID + mrc p15, 0, r6, c1, c0, 0 @ Control register + stmia r0, {r4 - r6} + ldmfd sp!, {r4 - r6, pc} +ENDPROC(cpu_feroceon_do_suspend) + +ENTRY(cpu_feroceon_do_resume) + mov ip, #0 + mcr p15, 0, ip, c8, c7, 0 @ invalidate I+D TLBs + mcr p15, 0, ip, c7, c7, 0 @ invalidate I+D caches + ldmia r0, {r4 - r6} + mcr p15, 0, r4, c13, c0, 0 @ PID + mcr p15, 0, r5, c3, c0, 0 @ Domain ID + mcr p15, 0, r1, c2, c0, 0 @ TTB address + mov r0, r6 @ control register + b cpu_resume_mmu +ENDPROC(cpu_feroceon_do_resume) +#endif + .type __feroceon_setup, #function __feroceon_setup: mov r0, #0 diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S index f64afb9f1bd5..bdd3be4be77a 100644 --- a/arch/arm/mm/proc-v7-2level.S +++ b/arch/arm/mm/proc-v7-2level.S @@ -110,7 +110,7 @@ ENTRY(cpu_v7_set_pte_ext) ARM( str r3, [r0, #2048]! ) THUMB( add r0, r0, #2048 ) THUMB( str r3, [r0] ) - ALT_SMP(mov pc,lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S index c36ac69488c8..01a719e18bb0 100644 --- a/arch/arm/mm/proc-v7-3level.S +++ b/arch/arm/mm/proc-v7-3level.S @@ -81,7 +81,7 @@ ENTRY(cpu_v7_set_pte_ext) tst r3, #1 << (55 - 32) @ L_PTE_DIRTY orreq r2, #L_PTE_RDONLY 1: strd r2, r3, [r0] - ALT_SMP(mov pc, lr) + ALT_SMP(W(nop)) ALT_UP (mcr p15, 0, r0, c7, c10, 1) @ flush_pte #endif mov pc, lr diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 5c6d5a3050ea..c63d9bdee51e 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -75,14 +75,15 @@ ENTRY(cpu_v7_do_idle) ENDPROC(cpu_v7_do_idle) ENTRY(cpu_v7_dcache_clean_area) - ALT_SMP(mov pc, lr) @ MP extensions imply L1 PTW - ALT_UP(W(nop)) - dcache_line_size r2, r3 -1: mcr p15, 0, r0, c7, c10, 1 @ clean D entry + ALT_SMP(W(nop)) @ MP extensions imply L1 PTW + ALT_UP_B(1f) + mov pc, lr +1: dcache_line_size r2, r3 +2: mcr p15, 0, r0, c7, c10, 1 @ clean D entry add r0, r0, r2 subs r1, r1, r2 - bhi 1b - dsb + bhi 2b + dsb ishst mov pc, lr ENDPROC(cpu_v7_dcache_clean_area) @@ -329,7 +330,19 @@ __v7_setup: 1: #endif -3: mov r10, #0 + /* Cortex-A15 Errata */ +3: ldr r10, =0x00000c0f @ Cortex-A15 primary part number + teq r0, r10 + bne 4f + +#ifdef CONFIG_ARM_ERRATA_773022 + cmp r6, #0x4 @ only present up to r0p4 + mrcle p15, 0, r10, c1, c0, 1 @ read aux control register + orrle r10, r10, #1 << 1 @ disable loop buffer + mcrle p15, 0, r10, c1, c0, 1 @ write aux control register +#endif + +4: mov r10, #0 mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate dsb #ifdef CONFIG_MMU diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index ea94765acf9a..355308767bae 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -35,7 +35,7 @@ ENTRY(v7wbi_flush_user_tlb_range) vma_vm_mm r3, r2 @ get vma->vm_mm mmid r3, r3 @ get vm_mm->context.id - dsb + dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT asid r3, r3 @ mask ASID @@ -56,7 +56,7 @@ ENTRY(v7wbi_flush_user_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - dsb + dsb ish mov pc, lr ENDPROC(v7wbi_flush_user_tlb_range) @@ -69,7 +69,7 @@ ENDPROC(v7wbi_flush_user_tlb_range) * - end - end address (exclusive, may not be aligned) */ ENTRY(v7wbi_flush_kern_tlb_range) - dsb + dsb ish mov r0, r0, lsr #PAGE_SHIFT @ align address mov r1, r1, lsr #PAGE_SHIFT mov r0, r0, lsl #PAGE_SHIFT @@ -84,7 +84,7 @@ ENTRY(v7wbi_flush_kern_tlb_range) add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b - dsb + dsb ish isb mov pc, lr ENDPROC(v7wbi_flush_kern_tlb_range) |