diff options
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/Kconfig | 23 | ||||
-rw-r--r-- | arch/arm/mm/abort-ev6.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/cache-v6.S | 33 | ||||
-rw-r--r-- | arch/arm/mm/flush.c | 23 | ||||
-rw-r--r-- | arch/arm/mm/ioremap.c | 6 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 15 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 6 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 97 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v6.S | 3 | ||||
-rw-r--r-- | arch/arm/mm/tlb-v7.S | 20 |
10 files changed, 199 insertions, 30 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 20979564e7ee..83c025e72ceb 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -391,7 +391,7 @@ config CPU_FEROCEON_OLD_ID # ARMv6 config CPU_V6 - bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB + bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6 select CPU_ABRT_EV6 select CPU_PABRT_NOIFAR @@ -416,7 +416,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 - bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB + bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX select CPU_32v6K select CPU_32v7 select CPU_ABRT_EV7 @@ -639,10 +639,23 @@ config CPU_BIG_ENDIAN port must properly enable any big-endian related features of your chipset/board/processor. +config CPU_ENDIAN_BE8 + bool + depends on CPU_BIG_ENDIAN + default CPU_V6 || CPU_V7 + help + Support for the BE-8 (big-endian) mode on ARMv6 and ARMv7 processors. + +config CPU_ENDIAN_BE32 + bool + depends on CPU_BIG_ENDIAN + default !CPU_ENDIAN_BE8 + help + Support for the BE-32 (big-endian) mode on pre-ARMv6 processors. + config CPU_HIGH_VECTOR depends on !MMU && CPU_CP15 && !CPU_ARM740T bool "Select the High exception vector" - default n help Say Y here to select high exception vector(0xFFFF0000~). The exception vector can be vary depending on the platform @@ -726,7 +739,6 @@ config NEEDS_SYSCALL_FOR_CMPXCHG config OUTER_CACHE bool - default n config CACHE_FEROCEON_L2 bool "Enable the Feroceon L2 cache controller" @@ -739,7 +751,6 @@ config CACHE_FEROCEON_L2 config CACHE_FEROCEON_L2_WRITETHROUGH bool "Force Feroceon L2 cache write through" depends on CACHE_FEROCEON_L2 - default n help Say Y here to use the Feroceon L2 cache in writethrough mode. Unless you specifically require this, say N for writeback mode. @@ -747,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX default y select OUTER_CACHE help diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 6f7e70907e44..f332df7f0d37 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -37,6 +37,9 @@ ENTRY(v6_early_abort) movne pc, lr do_thumb_abort ldreq r3, [r2] @ read aborted ARM instruction +#ifdef CONFIG_CPU_ENDIAN_BE8 + reveq r3, r3 +#endif do_ldrd_abort tst r3, #1 << 20 @ L = 0 -> write orreq r1, r1, #1 << 11 @ yes. diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 2c6c2a7c05a0..8f5c13f4c936 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S @@ -20,6 +20,31 @@ #define D_CACHE_LINE_SIZE 32 #define BTB_FLUSH_SIZE 8 +#ifdef CONFIG_ARM_ERRATA_411920 +/* + * Invalidate the entire I cache (this code is a workaround for the ARM1136 + * erratum 411920 - Invalidate Instruction Cache operation can fail. This + * erratum is present in 1136, 1156 and 1176. It does not affect the MPCore. + * + * Registers: + * r0 - set to 0 + * r1 - corrupted + */ +ENTRY(v6_icache_inval_all) + mov r0, #0 + mrs r1, cpsr + cpsid ifa @ disable interrupts + mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache + mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache + mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache + mcr p15, 0, r0, c7, c5, 0 @ invalidate entire I-cache + msr cpsr_cx, r1 @ restore interrupts + .rept 11 @ ARM Ltd recommends at least + nop @ 11 NOPs + .endr + mov pc, lr +#endif + /* * v6_flush_cache_all() * @@ -31,8 +56,12 @@ ENTRY(v6_flush_kern_cache_all) mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c14, 0 @ D cache clean+invalidate +#ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else + b v6_icache_inval_all +#endif +#else mcr p15, 0, r0, c7, c15, 0 @ Cache clean+invalidate #endif mov pc, lr @@ -103,8 +132,12 @@ ENTRY(v6_coherent_user_range) mov r0, #0 #ifdef HARVARD_CACHE mcr p15, 0, r0, c7, c10, 4 @ drain write buffer +#ifndef CONFIG_ARM_ERRATA_411920 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate #else + b v6_icache_inval_all +#endif +#else mcr p15, 0, r0, c7, c5, 6 @ invalidate BTB #endif mov pc, lr diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index 4e283481cee1..c07222eb5ce0 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c @@ -18,6 +18,10 @@ #include "mm.h" +#ifdef CONFIG_ARM_ERRATA_411920 +extern void v6_icache_inval_all(void); +#endif + #ifdef CONFIG_CPU_CACHE_VIPT #define ALIAS_FLUSH_START 0xffff4000 @@ -32,10 +36,15 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) asm( "mcrr p15, 0, %1, %0, c14\n" " mcr p15, 0, %2, c7, c10, 4\n" +#ifndef CONFIG_ARM_ERRATA_411920 " mcr p15, 0, %2, c7, c5, 0\n" +#endif : : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) : "cc"); +#ifdef CONFIG_ARM_ERRATA_411920 + v6_icache_inval_all(); +#endif } void flush_cache_mm(struct mm_struct *mm) @@ -48,11 +57,16 @@ void flush_cache_mm(struct mm_struct *mm) if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" + " mcr p15, 0, %0, c7, c10, 4\n" +#ifndef CONFIG_ARM_ERRATA_411920 " mcr p15, 0, %0, c7, c5, 0\n" - " mcr p15, 0, %0, c7, c10, 4" +#endif : : "r" (0) : "cc"); +#ifdef CONFIG_ARM_ERRATA_411920 + v6_icache_inval_all(); +#endif } } @@ -67,11 +81,16 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned if (cache_is_vipt_aliasing()) { asm( "mcr p15, 0, %0, c7, c14, 0\n" + " mcr p15, 0, %0, c7, c10, 4\n" +#ifndef CONFIG_ARM_ERRATA_411920 " mcr p15, 0, %0, c7, c5, 0\n" - " mcr p15, 0, %0, c7, c10, 4" +#endif : : "r" (0) : "cc"); +#ifdef CONFIG_ARM_ERRATA_411920 + v6_icache_inval_all(); +#endif } } diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 9f88dd3be601..0ab75c60f7cf 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -110,6 +110,12 @@ static int remap_area_pages(unsigned long start, unsigned long pfn, return err; } +int ioremap_page(unsigned long virt, unsigned long phys, + const struct mem_type *mtype) +{ + return remap_area_pages(virt, __phys_to_pfn(phys), PAGE_SIZE, mtype); +} +EXPORT_SYMBOL(ioremap_page); void __check_kvm_seq(struct mm_struct *mm) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index e6344ece00ce..fdaa9bb87c16 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -255,6 +255,7 @@ const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } +EXPORT_SYMBOL(get_mem_type); /* * Adjust the PMD section entries according to the CPU in use. @@ -839,6 +840,20 @@ void __init reserve_node_zero(pg_data_t *pgdat) reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, BOOTMEM_EXCLUSIVE); + /* + * U300 - This platform family can share physical memory + * between two ARM cpus, one running Linux and the other + * running another OS. + */ + if (machine_is_u300()) { +#ifdef CONFIG_MACH_U300_SINGLE_RAM +#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \ + CONFIG_MACH_U300_2MB_ALIGNMENT_FIX + res_size = 0x00100000; +#endif +#endif + } + #ifdef CONFIG_SA1111 /* * Because of the SA1111 DMA bug, we want to preserve our diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index f0cc599facb7..524ddae92595 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -10,6 +10,7 @@ * * This is the "shell" of the ARMv6 processor support. */ +#include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> @@ -132,7 +133,7 @@ cpu_v6_name: .asciz "ARMv6-compatible processor" .align - .section ".text.init", #alloc, #execinstr + __INIT /* * __v6_setup @@ -169,6 +170,9 @@ __v6_setup: #endif /* CONFIG_MMU */ adr r5, v6_crval ldmia r5, {r5, r6} +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r6, r6, #1 << 25 @ big-endian page tables +#endif mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index d1ebec42521d..180a08d03a03 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -9,6 +9,7 @@ * * This is the "shell" of the ARMv7 processor support. */ +#include <linux/init.h> #include <linux/linkage.h> #include <asm/assembler.h> #include <asm/asm-offsets.h> @@ -18,17 +19,23 @@ #include "proc-macros.S" -#define TTB_C (1 << 0) #define TTB_S (1 << 1) #define TTB_RGN_NC (0 << 3) #define TTB_RGN_OC_WBWA (1 << 3) #define TTB_RGN_OC_WT (2 << 3) #define TTB_RGN_OC_WB (3 << 3) +#define TTB_NOS (1 << 5) +#define TTB_IRGN_NC ((0 << 0) | (0 << 6)) +#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) +#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) +#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) #ifndef CONFIG_SMP -#define TTB_FLAGS TTB_C|TTB_RGN_OC_WB @ mark PTWs cacheable, outer WB +/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ +#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB #else -#define TTB_FLAGS TTB_C|TTB_S|TTB_RGN_OC_WBWA @ mark PTWs cacheable and shared, outer WBWA +/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ +#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA #endif ENTRY(cpu_v7_proc_init) @@ -95,6 +102,9 @@ ENTRY(cpu_v7_switch_mm) mov r2, #0 ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id orr r0, r0, #TTB_FLAGS +#ifdef CONFIG_ARM_ERRATA_430973 + mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB +#endif mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID isb 1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 @@ -153,7 +163,7 @@ cpu_v7_name: .ascii "ARMv7 Processor" .align - .section ".text.init", #alloc, #execinstr + __INIT /* * __v7_setup @@ -172,15 +182,45 @@ cpu_v7_name: */ __v7_setup: #ifdef CONFIG_SMP - mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode - orr r0, r0, #(0x1 << 6) + mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and + orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting mcr p15, 0, r0, c1, c0, 1 #endif adr r12, __v7_setup_stack @ the local stack stmia r12, {r0-r5, r7, r9, r11, lr} bl v7_flush_dcache_all ldmia r12, {r0-r5, r7, r9, r11, lr} - mov r10, #0 + + mrc p15, 0, r0, c0, c0, 0 @ read main ID register + and r10, r0, #0xff000000 @ ARM? + teq r10, #0x41000000 + bne 2f + and r5, r0, #0x00f00000 @ variant + and r6, r0, #0x0000000f @ revision + orr r0, r6, r5, lsr #20-4 @ combine variant and revision + +#ifdef CONFIG_ARM_ERRATA_430973 + teq r5, #0x00100000 @ only present in r1p* + mrceq p15, 0, r10, c1, c0, 1 @ read aux control register + orreq r10, r10, #(1 << 6) @ set IBE to 1 + mcreq p15, 0, r10, c1, c0, 1 @ write aux control register +#endif +#ifdef CONFIG_ARM_ERRATA_458693 + teq r0, #0x20 @ only present in r2p0 + mrceq p15, 0, r10, c1, c0, 1 @ read aux control register + orreq r10, r10, #(1 << 5) @ set L1NEON to 1 + orreq r10, r10, #(1 << 9) @ set PLDNOP to 1 + mcreq p15, 0, r10, c1, c0, 1 @ write aux control register +#endif +#ifdef CONFIG_ARM_ERRATA_460075 + teq r0, #0x20 @ only present in r2p0 + mrceq p15, 1, r10, c9, c0, 2 @ read L2 cache aux ctrl register + tsteq r10, #1 << 22 + orreq r10, r10, #(1 << 22) @ set the Write Allocate disable bit + mcreq p15, 1, r10, c9, c0, 2 @ write the L2 cache aux ctrl register +#endif + +2: mov r10, #0 #ifdef HARVARD_CACHE mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate #endif @@ -193,12 +233,43 @@ __v7_setup: mov r10, #0x1f @ domains 0, 1 = manager mcr p15, 0, r10, c3, c0, 0 @ load domain access register #endif - ldr r5, =0xff0aa1a8 - ldr r6, =0x40e040e0 + /* + * Memory region attributes with SCTLR.TRE=1 + * + * n = TEX[0],C,B + * TR = PRRR[2n+1:2n] - memory type + * IR = NMRR[2n+1:2n] - inner cacheable property + * OR = NMRR[2n+17:2n+16] - outer cacheable property + * + * n TR IR OR + * UNCACHED 000 00 + * BUFFERABLE 001 10 00 00 + * WRITETHROUGH 010 10 10 10 + * WRITEBACK 011 10 11 11 + * reserved 110 + * WRITEALLOC 111 10 01 01 + * DEV_SHARED 100 01 + * DEV_NONSHARED 100 01 + * DEV_WC 001 10 + * DEV_CACHED 011 10 + * + * Other attributes: + * + * DS0 = PRRR[16] = 0 - device shareable property + * DS1 = PRRR[17] = 1 - device shareable property + * NS0 = PRRR[18] = 0 - normal shareable property + * NS1 = PRRR[19] = 1 - normal shareable property + * NOS = PRRR[24+n] = 1 - not outer shareable + */ + ldr r5, =0xff0a81a8 @ PRRR + ldr r6, =0x40e040e0 @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR mcr p15, 0, r6, c10, c2, 1 @ write NMRR adr r5, v7_crval ldmia r5, {r5, r6} +#ifdef CONFIG_CPU_ENDIAN_BE8 + orr r6, r6, #1 << 25 @ big-endian page tables +#endif mrc p15, 0, r0, c1, c0, 0 @ read control register bic r0, r0, r5 @ clear bits them orr r0, r0, r6 @ set them @@ -206,14 +277,14 @@ __v7_setup: ENDPROC(__v7_setup) /* AT - * TFR EV X F I D LR - * .EEE ..EE PUI. .T.T 4RVI ZFRS BLDP WCAM + * TFR EV X F I D LR S + * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 1 0 110 0011 1.00 .111 1101 < we want + * 1 0 110 0011 1100 .111 1101 < we want */ .type v7_crval, #object v7_crval: - crval clear=0x0120c302, mmuset=0x10c0387d, ucset=0x00c0187c + crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c __v7_setup_stack: .space 4 * 11 @ 11 registers diff --git a/arch/arm/mm/tlb-v6.S b/arch/arm/mm/tlb-v6.S index 20f84bbaa9bb..73d7d89b04c4 100644 --- a/arch/arm/mm/tlb-v6.S +++ b/arch/arm/mm/tlb-v6.S @@ -10,6 +10,7 @@ * ARM architecture version 6 TLB handling functions. * These assume a split I/D TLB. */ +#include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/page.h> @@ -87,7 +88,7 @@ ENTRY(v6wbi_flush_kern_tlb_range) mcr p15, 0, r2, c7, c5, 4 @ prefetch flush mov pc, lr - .section ".text.init", #alloc, #execinstr + __INIT .type v6wbi_tlb_fns, #object ENTRY(v6wbi_tlb_fns) diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S index 24ba5109f2e7..a26a605b73bd 100644 --- a/arch/arm/mm/tlb-v7.S +++ b/arch/arm/mm/tlb-v7.S @@ -11,6 +11,7 @@ * ARM architecture version 6 TLB handling functions. * These assume a split I/D TLB. */ +#include <linux/init.h> #include <linux/linkage.h> #include <asm/asm-offsets.h> #include <asm/page.h> @@ -41,9 +42,11 @@ ENTRY(v7wbi_flush_user_tlb_range) mov r1, r1, lsl #PAGE_SHIFT vma_vm_flags r2, r2 @ get vma->vm_flags 1: - mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA (was 1) - tst r2, #VM_EXEC @ Executable area ? - mcrne p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA (was 1) +#ifdef CONFIG_SMP + mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) +#else + mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA +#endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b @@ -68,8 +71,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov r0, r0, lsl #PAGE_SHIFT mov r1, r1, lsl #PAGE_SHIFT 1: - mcr p15, 0, r0, c8, c6, 1 @ TLB invalidate D MVA - mcr p15, 0, r0, c8, c5, 1 @ TLB invalidate I MVA +#ifdef CONFIG_SMP + mcr p15, 0, r0, c8, c3, 1 @ TLB invalidate U MVA (shareable) +#else + mcr p15, 0, r0, c8, c7, 1 @ TLB invalidate U MVA +#endif add r0, r0, #PAGE_SZ cmp r0, r1 blo 1b @@ -80,11 +86,11 @@ ENTRY(v7wbi_flush_kern_tlb_range) mov pc, lr ENDPROC(v7wbi_flush_kern_tlb_range) - .section ".text.init", #alloc, #execinstr + __INIT .type v7wbi_tlb_fns, #object ENTRY(v7wbi_tlb_fns) .long v7wbi_flush_user_tlb_range .long v7wbi_flush_kern_tlb_range - .long v6wbi_tlb_flags + .long v7wbi_tlb_flags .size v7wbi_tlb_fns, . - v7wbi_tlb_fns |