From 31aa8fd6fd30b0f36416df7d09619768d26b4332 Mon Sep 17 00:00:00 2001 From: Russell King Date: Fri, 18 Dec 2009 11:10:03 +0000 Subject: ARM: Add caller information to ioremap This allows the procfs vmallocinfo file to show who created the ioremap regions. Note: __builtin_return_address(0) doesn't do what's expected if its used in an inline function, so we leave __arm_ioremap callers in such places alone. Signed-off-by: Russell King --- arch/arm/mm/ioremap.c | 57 ++++++++++++++++++++++++++++++--------------------- arch/arm/mm/nommu.c | 12 +++++++++++ 2 files changed, 46 insertions(+), 23 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index 0ab75c60f7cf..28c8b950ef04 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -139,8 +139,8 @@ void __check_kvm_seq(struct mm_struct *mm) * which requires the new ioremap'd region to be referenced, the CPU will * reference the _old_ region. * - * Note that get_vm_area() allocates a guard 4K page, so we need to mask - * the size back to 1MB aligned or we will overflow in the loop below. + * Note that get_vm_area_caller() allocates a guard 4K page, so we need to + * mask the size back to 1MB aligned or we will overflow in the loop below. */ static void unmap_area_sections(unsigned long virt, unsigned long size) { @@ -254,22 +254,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, } #endif - -/* - * Remap an arbitrary physical address space into the kernel virtual - * address space. Needed when the kernel wants to access high addresses - * directly. - * - * NOTE! We need to allow non-page-aligned mappings too: we will obviously - * have to convert them into an offset in a page-aligned mapping, but the - * caller shouldn't need to know that small detail. - * - * 'flags' are the extra L_PTE_ flags that you want to specify for this - * mapping. See for more information. - */ -void __iomem * -__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, - unsigned int mtype) +void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, + unsigned long offset, size_t size, unsigned int mtype, void *caller) { const struct mem_type *type; int err; @@ -291,7 +277,7 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, */ size = PAGE_ALIGN(offset + size); - area = get_vm_area(size, VM_IOREMAP); + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; @@ -318,10 +304,9 @@ __arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, flush_cache_vmap(addr, addr + size); return (void __iomem *) (offset + addr); } -EXPORT_SYMBOL(__arm_ioremap_pfn); -void __iomem * -__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) { unsigned long last_addr; unsigned long offset = phys_addr & ~PAGE_MASK; @@ -334,7 +319,33 @@ __arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) if (!size || last_addr < phys_addr) return NULL; - return __arm_ioremap_pfn(pfn, offset, size, mtype); + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + caller); +} + +/* + * Remap an arbitrary physical address space into the kernel virtual + * address space. Needed when the kernel wants to access high addresses + * directly. + * + * NOTE! We need to allow non-page-aligned mappings too: we will obviously + * have to convert them into an offset in a page-aligned mapping, but the + * caller shouldn't need to know that small detail. + */ +void __iomem * +__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size, + unsigned int mtype) +{ + return __arm_ioremap_pfn_caller(pfn, offset, size, mtype, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(__arm_ioremap_pfn); + +void __iomem * +__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) +{ + return __arm_ioremap_caller(phys_addr, size, mtype, + __builtin_return_address(0)); } EXPORT_SYMBOL(__arm_ioremap); diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 374a8311bc84..9bfeb6b9509a 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -74,6 +74,12 @@ void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, } EXPORT_SYMBOL(__arm_ioremap_pfn); +void __iomem *__arm_ioremap_pfn_caller(unsigned long pfn, unsigned long offset, + size_t size, unsigned int mtype, void *caller) +{ + return __arm_ioremap_pfn(pfn, offset, size, mtype); +} + void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype) { @@ -81,6 +87,12 @@ void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, } EXPORT_SYMBOL(__arm_ioremap); +void __iomem *__arm_ioremap(unsigned long phys_addr, size_t size, + unsigned int mtype, void *caller) +{ + return __arm_ioremap(phys_addr, size, mtype); +} + void __iounmap(volatile void __iomem *addr) { } -- cgit v1.2.3 From e119bfff1f102f8d1505910cd6c09df55c776b43 Mon Sep 17 00:00:00 2001 From: Russell King Date: Sun, 10 Jan 2010 17:23:29 +0000 Subject: ARM: Move creation of /proc/cpu out of alignment.c Always creating this directory avoids other users having to jump through silly hoops when they want to share this directory. Signed-off-by: Russell King --- arch/arm/Kconfig | 4 ++++ arch/arm/kernel/setup.c | 15 ++++++++++++++- arch/arm/mm/alignment.c | 6 +----- 3 files changed, 19 insertions(+), 6 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c2238cd474c7..b224216c11db 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -52,6 +52,9 @@ config HAVE_TCM bool select GENERIC_ALLOCATOR +config HAVE_PROC_CPU + bool + config NO_IOPORT bool @@ -1229,6 +1232,7 @@ config ALIGNMENT_TRAP bool depends on CPU_CP15_MMU default y if !ARCH_EBSA110 + select HAVE_PROC_CPU if PROC_FS help ARM processors cannot fetch/store information which is not naturally aligned on the bus, i.e., a 4 byte fetch must start at an diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index c6c57b640b6b..5357e48f2c39 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -782,9 +783,21 @@ static int __init topology_init(void) return 0; } - subsys_initcall(topology_init); +#ifdef CONFIG_HAVE_PROC_CPU +static int __init proc_cpu_init(void) +{ + struct proc_dir_entry *res; + + res = proc_mkdir("cpu", NULL); + if (!res) + return -ENOMEM; + return 0; +} +fs_initcall(proc_cpu_init); +#endif + static const char *hwcap_str[] = { "swp", "half", diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index b270d6228fe2..0c5eb6983cef 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -898,11 +898,7 @@ static int __init alignment_init(void) #ifdef CONFIG_PROC_FS struct proc_dir_entry *res; - res = proc_mkdir("cpu", NULL); - if (!res) - return -ENOMEM; - - res = create_proc_entry("alignment", S_IWUSR | S_IRUGO, res); + res = create_proc_entry("cpu/alignment", S_IWUSR | S_IRUGO, NULL); if (!res) return -ENOMEM; -- cgit v1.2.3 From 2b0d8c251b8876d530a6bf671eb5425838fa698a Mon Sep 17 00:00:00 2001 From: Jeremy Kerr Date: Mon, 11 Jan 2010 23:17:34 +0100 Subject: ARM: 5880/1: arm: use generic infrastructure for early params The ARM setup code includes its own parser for early params, there's also one in the generic init code. This patch removes __early_init (and related code) from arch/arm/kernel/setup.c, and changes users to the generic early_init macro instead. The generic macro takes a char * argument, rather than char **, so we need to update the parser functions a little. Signed-off-by: Jeremy Kerr Signed-off-by: Russell King --- arch/arm/include/asm/setup.h | 12 -------- arch/arm/kernel/setup.c | 62 ++++++++++----------------------------- arch/arm/kernel/vmlinux.lds.S | 4 --- arch/arm/mach-footbridge/common.c | 7 +++-- arch/arm/mm/init.c | 12 ++++---- arch/arm/mm/mmu.c | 41 +++++++++++++------------- 6 files changed, 48 insertions(+), 90 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/setup.h b/arch/arm/include/asm/setup.h index 5ccce0a9b03c..f392fb4437af 100644 --- a/arch/arm/include/asm/setup.h +++ b/arch/arm/include/asm/setup.h @@ -223,18 +223,6 @@ extern struct meminfo meminfo; #define bank_phys_end(bank) ((bank)->start + (bank)->size) #define bank_phys_size(bank) (bank)->size -/* - * Early command line parameters. - */ -struct early_params { - const char *arg; - void (*fn)(char **p); -}; - -#define __early_param(name,fn) \ -static struct early_params __early_##fn __used \ -__attribute__((__section__(".early_param.init"))) = { name, fn } - #endif /* __KERNEL__ */ #endif diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 5357e48f2c39..b01a56a03ed8 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -418,10 +418,11 @@ static int __init arm_add_memory(unsigned long start, unsigned long size) * Pick out the memory size. We look for mem=size@start, * where start and size are "size[KkMm]" */ -static void __init early_mem(char **p) +static int __init early_mem(char *p) { static int usermem __initdata = 0; unsigned long size, start; + char *endp; /* * If the user specifies memory size, we @@ -434,52 +435,15 @@ static void __init early_mem(char **p) } start = PHYS_OFFSET; - size = memparse(*p, p); - if (**p == '@') - start = memparse(*p + 1, p); + size = memparse(p, &endp); + if (*endp == '@') + start = memparse(endp + 1, NULL); arm_add_memory(start, size); -} -__early_param("mem=", early_mem); -/* - * Initial parsing of the command line. - */ -static void __init parse_cmdline(char **cmdline_p, char *from) -{ - char c = ' ', *to = command_line; - int len = 0; - - for (;;) { - if (c == ' ') { - extern struct early_params __early_begin, __early_end; - struct early_params *p; - - for (p = &__early_begin; p < &__early_end; p++) { - int arglen = strlen(p->arg); - - if (memcmp(from, p->arg, arglen) == 0) { - if (to != command_line) - to -= 1; - from += arglen; - p->fn(&from); - - while (*from != ' ' && *from != '\0') - from++; - break; - } - } - } - c = *from++; - if (!c) - break; - if (COMMAND_LINE_SIZE <= ++len) - break; - *to++ = c; - } - *to = '\0'; - *cmdline_p = command_line; + return 0; } +early_param("mem", early_mem); static void __init setup_ramdisk(int doload, int prompt, int image_start, unsigned int rd_sz) @@ -740,9 +704,15 @@ void __init setup_arch(char **cmdline_p) init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; - memcpy(boot_command_line, from, COMMAND_LINE_SIZE); - boot_command_line[COMMAND_LINE_SIZE-1] = '\0'; - parse_cmdline(cmdline_p, from); + /* parse_early_param needs a boot_command_line */ + strlcpy(boot_command_line, from, COMMAND_LINE_SIZE); + + /* populate command_line too for later use, preserving boot_command_line */ + strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = command_line; + + parse_early_param(); + paging_init(mdesc); request_standard_resources(&meminfo, mdesc); diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 4957e13ef55b..b16c07914b55 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -43,10 +43,6 @@ SECTIONS INIT_SETUP(16) - __early_begin = .; - *(.early_param.init) - __early_end = .; - INIT_CALLS CON_INITCALL SECURITY_INITCALL diff --git a/arch/arm/mach-footbridge/common.c b/arch/arm/mach-footbridge/common.c index 41febc796b1c..e3bc3f6f6b10 100644 --- a/arch/arm/mach-footbridge/common.c +++ b/arch/arm/mach-footbridge/common.c @@ -32,12 +32,13 @@ unsigned int mem_fclk_21285 = 50000000; EXPORT_SYMBOL(mem_fclk_21285); -static void __init early_fclk(char **arg) +static int __init early_fclk(char *arg) { - mem_fclk_21285 = simple_strtoul(*arg, arg, 0); + mem_fclk_21285 = simple_strtoul(arg, NULL, 0); + return 0; } -__early_param("mem_fclk_21285=", early_fclk); +early_param("mem_fclk_21285", early_fclk); static int __init parse_tag_memclk(const struct tag *tag) { diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a04ffbbbe253..a340569b991e 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -32,19 +32,21 @@ static unsigned long phys_initrd_start __initdata = 0; static unsigned long phys_initrd_size __initdata = 0; -static void __init early_initrd(char **p) +static int __init early_initrd(char *p) { unsigned long start, size; + char *endp; - start = memparse(*p, p); - if (**p == ',') { - size = memparse((*p) + 1, p); + start = memparse(p, &endp); + if (*endp == ',') { + size = memparse(endp + 1, NULL); phys_initrd_start = start; phys_initrd_size = size; } + return 0; } -__early_param("initrd=", early_initrd); +early_param("initrd", early_initrd); static int __init parse_tag_initrd(const struct tag *tag) { diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1708da82da96..88f5d71248d9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -100,18 +100,17 @@ static struct cachepolicy cache_policies[] __initdata = { * writebuffer to be turned off. (Note: the write * buffer should not be on and the cache off). */ -static void __init early_cachepolicy(char **p) +static int __init early_cachepolicy(char *p) { int i; for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { int len = strlen(cache_policies[i].policy); - if (memcmp(*p, cache_policies[i].policy, len) == 0) { + if (memcmp(p, cache_policies[i].policy, len) == 0) { cachepolicy = i; cr_alignment &= ~cache_policies[i].cr_mask; cr_no_alignment &= ~cache_policies[i].cr_mask; - *p += len; break; } } @@ -130,36 +129,37 @@ static void __init early_cachepolicy(char **p) } flush_cache_all(); set_cr(cr_alignment); + return 0; } -__early_param("cachepolicy=", early_cachepolicy); +early_param("cachepolicy", early_cachepolicy); -static void __init early_nocache(char **__unused) +static int __init early_nocache(char *__unused) { char *p = "buffered"; printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nocache", early_nocache); +early_param("nocache", early_nocache); -static void __init early_nowrite(char **__unused) +static int __init early_nowrite(char *__unused) { char *p = "uncached"; printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); - early_cachepolicy(&p); + early_cachepolicy(p); + return 0; } -__early_param("nowb", early_nowrite); +early_param("nowb", early_nowrite); -static void __init early_ecc(char **p) +static int __init early_ecc(char *p) { - if (memcmp(*p, "on", 2) == 0) { + if (memcmp(p, "on", 2) == 0) ecc_mask = PMD_PROTECTION; - *p += 2; - } else if (memcmp(*p, "off", 3) == 0) { + else if (memcmp(p, "off", 3) == 0) ecc_mask = 0; - *p += 3; - } + return 0; } -__early_param("ecc=", early_ecc); +early_param("ecc", early_ecc); static int __init noalign_setup(char *__unused) { @@ -670,9 +670,9 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M; * bytes. This can be used to increase (or decrease) the vmalloc * area - the default is 128m. */ -static void __init early_vmalloc(char **arg) +static int __init early_vmalloc(char *arg) { - vmalloc_reserve = memparse(*arg, arg); + vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; @@ -687,8 +687,9 @@ static void __init early_vmalloc(char **arg) "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } + return 0; } -__early_param("vmalloc=", early_vmalloc); +early_param("vmalloc", early_vmalloc); #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) -- cgit v1.2.3 From 11805bcfa411c816b7c76fc40724be6733c74ffc Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 26 Jan 2010 19:09:42 +0100 Subject: ARM: 5905/1: ARM: Global ASID allocation on SMP The current ASID allocation algorithm doesn't ensure the notification of the other CPUs when the ASID rolls over. This may lead to two processes using the same ASID (but different generation) or multiple threads of the same process using different ASIDs. This patch adds the broadcasting of the ASID rollover event to the other CPUs. To avoid a race on multiple CPUs modifying "cpu_last_asid" during the handling of the broadcast, the ASID numbering now starts at "smp_processor_id() + 1". At rollover, the cpu_last_asid will be set to NR_CPUS. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- arch/arm/include/asm/mmu.h | 1 + arch/arm/include/asm/mmu_context.h | 15 +++++ arch/arm/mm/context.c | 124 ++++++++++++++++++++++++++++++++----- 3 files changed, 126 insertions(+), 14 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/mmu.h b/arch/arm/include/asm/mmu.h index b561584d04a1..68870c776671 100644 --- a/arch/arm/include/asm/mmu.h +++ b/arch/arm/include/asm/mmu.h @@ -6,6 +6,7 @@ typedef struct { #ifdef CONFIG_CPU_HAS_ASID unsigned int id; + spinlock_t id_lock; #endif unsigned int kvm_seq; } mm_context_t; diff --git a/arch/arm/include/asm/mmu_context.h b/arch/arm/include/asm/mmu_context.h index de6cefb329dd..a0b3cac0547c 100644 --- a/arch/arm/include/asm/mmu_context.h +++ b/arch/arm/include/asm/mmu_context.h @@ -43,12 +43,23 @@ void __check_kvm_seq(struct mm_struct *mm); #define ASID_FIRST_VERSION (1 << ASID_BITS) extern unsigned int cpu_last_asid; +#ifdef CONFIG_SMP +DECLARE_PER_CPU(struct mm_struct *, current_mm); +#endif void __init_new_context(struct task_struct *tsk, struct mm_struct *mm); void __new_context(struct mm_struct *mm); static inline void check_context(struct mm_struct *mm) { + /* + * This code is executed with interrupts enabled. Therefore, + * mm->context.id cannot be updated to the latest ASID version + * on a different CPU (and condition below not triggered) + * without first getting an IPI to reset the context. The + * alternative is to take a read_lock on mm->context.id_lock + * (after changing its type to rwlock_t). + */ if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) __new_context(mm); @@ -108,6 +119,10 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, __flush_icache_all(); #endif if (!cpumask_test_and_set_cpu(cpu, mm_cpumask(next)) || prev != next) { +#ifdef CONFIG_SMP + struct mm_struct **crt_mm = &per_cpu(current_mm, cpu); + *crt_mm = next; +#endif check_context(next); cpu_switch_mm(next->pgd, next); if (cache_is_vivt()) diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index a9e22e31eaa1..b0ee9ba3cfab 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -10,12 +10,17 @@ #include #include #include +#include +#include #include #include static DEFINE_SPINLOCK(cpu_asid_lock); unsigned int cpu_last_asid = ASID_FIRST_VERSION; +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct mm_struct *, current_mm); +#endif /* * We fork()ed a process, and we need a new context for the child @@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) { mm->context.id = 0; + spin_lock_init(&mm->context.id_lock); } +static void flush_context(void) +{ + /* set the reserved ASID before flushing the TLB */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + isb(); + local_flush_tlb_all(); + if (icache_is_vivt_asid_tagged()) { + __flush_icache_all(); + dsb(); + } +} + +#ifdef CONFIG_SMP + +static void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + unsigned long flags; + + /* + * Locking needed for multi-threaded applications where the + * same mm->context.id could be set from different CPUs during + * the broadcast. This function is also called via IPI so the + * mm->context.id_lock has to be IRQ-safe. + */ + spin_lock_irqsave(&mm->context.id_lock, flags); + if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) { + /* + * Old version of ASID found. Set the new one and + * reset mm_cpumask(mm). + */ + mm->context.id = asid; + cpumask_clear(mm_cpumask(mm)); + } + spin_unlock_irqrestore(&mm->context.id_lock, flags); + + /* + * Set the mm_cpumask(mm) bit for the current CPU. + */ + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); +} + +/* + * Reset the ASID on the current CPU. This function call is broadcast + * from the CPU handling the ASID rollover and holding cpu_asid_lock. + */ +static void reset_context(void *info) +{ + unsigned int asid; + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = per_cpu(current_mm, cpu); + + /* + * Check if a current_mm was set on this CPU as it might still + * be in the early booting stages and using the reserved ASID. + */ + if (!mm) + return; + + smp_rmb(); + asid = cpu_last_asid + cpu + 1; + + flush_context(); + set_mm_context(mm, asid); + + /* set the new ASID */ + asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + isb(); +} + +#else + +static inline void set_mm_context(struct mm_struct *mm, unsigned int asid) +{ + mm->context.id = asid; + cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); +} + +#endif + void __new_context(struct mm_struct *mm) { unsigned int asid; spin_lock(&cpu_asid_lock); +#ifdef CONFIG_SMP + /* + * Check the ASID again, in case the change was broadcast from + * another CPU before we acquired the lock. + */ + if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) { + cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm)); + spin_unlock(&cpu_asid_lock); + return; + } +#endif + /* + * At this point, it is guaranteed that the current mm (with + * an old ASID) isn't active on any other CPU since the ASIDs + * are changed simultaneously via IPI. + */ asid = ++cpu_last_asid; if (asid == 0) asid = cpu_last_asid = ASID_FIRST_VERSION; @@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm) * to start a new version and flush the TLB. */ if (unlikely((asid & ~ASID_MASK) == 0)) { - asid = ++cpu_last_asid; - /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n" - : - : "r" (0)); - isb(); - flush_tlb_all(); - if (icache_is_vivt_asid_tagged()) { - __flush_icache_all(); - dsb(); - } + asid = cpu_last_asid + smp_processor_id() + 1; + flush_context(); +#ifdef CONFIG_SMP + smp_wmb(); + smp_call_function(reset_context, NULL, 1); +#endif + cpu_last_asid += NR_CPUS; } - spin_unlock(&cpu_asid_lock); - cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); - mm->context.id = asid; + set_mm_context(mm, asid); + spin_unlock(&cpu_asid_lock); } -- cgit v1.2.3 From 1a28e3d977860dc760909083df625b300f695680 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Mon, 1 Feb 2010 23:30:26 +0100 Subject: ARM: 5911/1: ARM: Select CPU_32v6K for CPU_V7 only if ARCH_OMAP2 is not selected Otherwise the kernel built with both CPU_V6 and CPU_V7 will not boot on omap2. Signed-off-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/Kconfig | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index baf638487a2d..4c2e90dc549c 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -399,7 +399,7 @@ config CPU_V6 config CPU_32v6K bool "Support ARM V6K processor extensions" if !SMP depends on CPU_V6 - default y if SMP && !ARCH_MX3 + default y if SMP && !(ARCH_MX3 || ARCH_OMAP2) help Say Y here if your ARMv6 processor supports the 'K' extension. This enables the kernel to use some instructions not present @@ -410,7 +410,7 @@ config CPU_32v6K # ARMv7 config CPU_V7 bool "Support ARM V7 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX - select CPU_32v6K + select CPU_32v6K if !ARCH_OMAP2 select CPU_32v7 select CPU_ABRT_EV7 select CPU_PABRT_V7 -- cgit v1.2.3 From 424d6b145f863d012c540082d0c1afb5bb4dea48 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:35:06 +0100 Subject: ARM: 5916/1: ARM: L2 : Add maintainace by line helper functions This patch adds the cache maintainance by line helper functions. Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/cache-l2x0.c | 36 ++++++++++++++++++++++++++---------- 1 file changed, 26 insertions(+), 10 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index cb8fc6573b1b..1a14d18e5713 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -42,6 +42,27 @@ static inline void cache_sync(void) cache_wait(base + L2X0_CACHE_SYNC, 1); } +static inline void l2x0_clean_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); +} + +static inline void l2x0_inv_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_INV_LINE_PA); +} + static inline void l2x0_inv_all(void) { unsigned long flags; @@ -62,23 +83,20 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(start); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(end, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(end); } while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_INV_LINE_PA, 1); - writel(start, base + L2X0_INV_LINE_PA); + l2x0_inv_line(start); start += CACHE_LINE_SIZE; } @@ -103,8 +121,7 @@ static void l2x0_clean_range(unsigned long start, unsigned long end) unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_LINE_PA); + l2x0_clean_line(start); start += CACHE_LINE_SIZE; } @@ -129,8 +146,7 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel(start, base + L2X0_CLEAN_INV_LINE_PA); + l2x0_flush_line(start); start += CACHE_LINE_SIZE; } -- cgit v1.2.3 From d309427e792ea750cdd312e7a92cf6047ae44962 Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:37:09 +0100 Subject: ARM: 5917/1: OMAP4: Add L2 Cache support This patch adds L2 Cache support for OMAP4. External L2 cache is used in OMAP4 CC: Catalin Marinas Signed-off-by: Santosh Shilimkar Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mach-omap2/board-4430sdp.c | 54 ++++++++++++++++++++++++++++++ arch/arm/mm/Kconfig | 2 +- arch/arm/plat-omap/include/plat/omap44xx.h | 1 + 3 files changed, 56 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 0c6be6b4a7e2..8ba8fb5b2514 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c @@ -28,6 +28,7 @@ #include #include #include +#include static struct platform_device sdp4430_lcd_device = { .name = "sdp4430_lcd", @@ -50,6 +51,59 @@ static struct omap_board_config_kernel sdp4430_config[] __initdata = { { OMAP_TAG_LCD, &sdp4430_lcd_config }, }; +#ifdef CONFIG_CACHE_L2X0 +noinline void omap_smc1(u32 fn, u32 arg) +{ + register u32 r12 asm("r12") = fn; + register u32 r0 asm("r0") = arg; + + /* This is common routine cache secure monitor API used to + * modify the PL310 secure registers. + * r0 contains the value to be modified and "r12" contains + * the monitor API number. It uses few CPU registers + * internally and hence they need be backed up including + * link register "lr". + * Explicitly save r11 and r12 the compiler generated code + * won't save it. + */ + asm volatile( + "stmfd r13!, {r11,r12}\n" + "dsb\n" + "smc\n" + "ldmfd r13!, {r11,r12}\n" + : "+r" (r0), "+r" (r12) + : + : "r4", "r5", "r10", "lr", "cc"); +} +EXPORT_SYMBOL(omap_smc1); + +static int __init omap_l2_cache_init(void) +{ + void __iomem *l2cache_base; + + /* To avoid code running on other OMAPs in + * multi-omap builds + */ + if (!cpu_is_omap44xx()) + return -ENODEV; + + /* Static mapping, never released */ + l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); + BUG_ON(!l2cache_base); + + /* Enable PL310 L2 Cache controller */ + omap_smc1(0x102, 0x1); + + /* 32KB way size, 16-way associativity, + * parity disabled + */ + l2x0_init(l2cache_base, 0x0e050000, 0xc0000fff); + + return 0; +} +early_initcall(omap_l2_cache_init); +#endif + static void __init gic_init_irq(void) { void __iomem *base; diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 4c2e90dc549c..e859743024a0 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -754,7 +754,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH config CACHE_L2X0 bool "Enable the L2x0 outer cache controller" depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK + REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4 default y select OUTER_CACHE help diff --git a/arch/arm/plat-omap/include/plat/omap44xx.h b/arch/arm/plat-omap/include/plat/omap44xx.h index ef870de43c29..c7d628ecb467 100644 --- a/arch/arm/plat-omap/include/plat/omap44xx.h +++ b/arch/arm/plat-omap/include/plat/omap44xx.h @@ -40,6 +40,7 @@ #define OMAP44XX_GIC_CPU_BASE 0x48240100 #define OMAP44XX_SCU_BASE 0x48240000 #define OMAP44XX_LOCAL_TWD_BASE 0x48240600 +#define OMAP44XX_L2CACHE_BASE 0x48242000 #define OMAP44XX_WKUPGEN_BASE 0x48281000 #define OMAP44XX_MAILBOX_BASE (L4_44XX_BASE + 0xF4000) -- cgit v1.2.3 From 9e65582a8e8715f883a34eea66e0643778ce878d Mon Sep 17 00:00:00 2001 From: Santosh Shilimkar Date: Thu, 4 Feb 2010 19:42:42 +0100 Subject: ARM: 5919/1: ARM: L2 : Errata 588369: Clean & Invalidate do not invalidate clean lines This patch implements the work-around for the errata 588369.The secure API is used to alter L2 debug register because of trust-zone. This version updated with comments from Russell and Catalin and generated against 2.6.33-rc6 mainline kernel. Detail comments can be found: http://www.spinics.net/lists/linux-omap/msg23431.html Signed-off-by: Woodruff Richard Signed-off-by: Santosh Shilimkar Acked-by: Catalin Marinas Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/Kconfig | 13 +++++++++++++ arch/arm/mm/cache-l2x0.c | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 762ae536f909..0f1ad743ccdd 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -924,6 +924,19 @@ config ARM_ERRATA_460075 ACTLR register. Note that setting specific bits in the ACTLR register may not be available in non-secure mode. +config PL310_ERRATA_588369 + bool "Clean & Invalidate maintenance operations do not invalidate clean lines" + depends on CACHE_L2X0 && ARCH_OMAP4 + help + The PL310 L2 cache controller implements three types of Clean & + Invalidate maintenance operations: by Physical Address + (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). + They are architecturally defined to behave as the execution of a + clean operation followed immediately by an invalidate operation, + both performing to the same memory location. This functionality + is not correctly implemented in PL310 as clean lines are not + invalidated as a result of these operations. Note that this errata + uses Texas Instrument's secure monitor api. endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 1a14d18e5713..07334632d3e2 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -56,12 +56,42 @@ static inline void l2x0_inv_line(unsigned long addr) writel(addr, base + L2X0_INV_LINE_PA); } +#ifdef CONFIG_PL310_ERRATA_588369 +static void debug_writel(unsigned long val) +{ + extern void omap_smc1(u32 fn, u32 arg); + + /* + * Texas Instrument secure monitor api to modify the + * PL310 Debug Control Register. + */ + omap_smc1(0x100, val); +} + +static inline void l2x0_flush_line(unsigned long addr) +{ + void __iomem *base = l2x0_base; + + /* Clean by PA followed by Invalidate by PA */ + cache_wait(base + L2X0_CLEAN_LINE_PA, 1); + writel(addr, base + L2X0_CLEAN_LINE_PA); + cache_wait(base + L2X0_INV_LINE_PA, 1); + writel(addr, base + L2X0_INV_LINE_PA); +} +#else + +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) +{ +} + static inline void l2x0_flush_line(unsigned long addr) { void __iomem *base = l2x0_base; cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); writel(addr, base + L2X0_CLEAN_INV_LINE_PA); } +#endif static inline void l2x0_inv_all(void) { @@ -83,13 +113,17 @@ static void l2x0_inv_range(unsigned long start, unsigned long end) spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); + debug_writel(0x03); l2x0_flush_line(start); + debug_writel(0x00); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); + debug_writel(0x03); l2x0_flush_line(end); + debug_writel(0x00); } while (start < end) { @@ -145,10 +179,12 @@ static void l2x0_flush_range(unsigned long start, unsigned long end) while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); + debug_writel(0x03); while (start < blk_end) { l2x0_flush_line(start); start += CACHE_LINE_SIZE; } + debug_writel(0x00); if (blk_end < end) { spin_unlock_irqrestore(&l2x0_lock, flags); -- cgit v1.2.3 From db9ef1af4879c121c354ad2f653f185f1d50fd89 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:45:47 +0100 Subject: ARM: 5926/1: Add "Virtual kernel memory..." printout. Code based on parisc and x86_32. Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/init.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 69 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index a340569b991e..e8e3a74ac5b5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include @@ -562,7 +563,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi) */ void __init mem_init(void) { - unsigned int codesize, datasize, initsize; + unsigned long reserved_pages, free_pages; int i, node; #ifndef CONFIG_DISCONTIGMEM @@ -598,6 +599,33 @@ void __init mem_init(void) totalram_pages += totalhigh_pages; #endif + reserved_pages = free_pages = 0; + + for_each_online_node(node) { + pg_data_t *n = NODE_DATA(node); + struct page *map = pgdat_page_nr(n, 0) - n->node_start_pfn; + + for_each_nodebank(i, &meminfo, node) { + struct membank *bank = &meminfo.bank[i]; + unsigned int pfn1, pfn2; + struct page *page, *end; + + pfn1 = bank_pfn_start(bank); + pfn2 = bank_pfn_end(bank); + + page = map + pfn1; + end = map + pfn2; + + do { + if (PageReserved(page)) + reserved_pages++; + else if (!page_count(page)) + free_pages++; + page++; + } while (page < end); + } + } + /* * Since our memory may not be contiguous, calculate the * real number of pages we have in this system @@ -610,16 +638,48 @@ void __init mem_init(void) } printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT)); - codesize = _etext - _text; - datasize = _end - _data; - initsize = __init_end - __init_begin; - - printk(KERN_NOTICE "Memory: %luKB available (%dK code, " - "%dK data, %dK init, %luK highmem)\n", - nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, - datasize >> 10, initsize >> 10, + printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n", + nr_free_pages() << (PAGE_SHIFT-10), + free_pages << (PAGE_SHIFT-10), + reserved_pages << (PAGE_SHIFT-10), totalhigh_pages << (PAGE_SHIFT-10)); +#define MLK(b, t) b, t, ((t) - (b)) >> 10 +#define MLM(b, t) b, t, ((t) - (b)) >> 20 +#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) + + printk(KERN_NOTICE "Virtual kernel memory layout:\n" + " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" + " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" + " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" + " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" +#ifdef CONFIG_HIGHMEM + " pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif + " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" + " .init : 0x%p" " - 0x%p" " (%4d kB)\n" + " .text : 0x%p" " - 0x%p" " (%4d kB)\n" + " .data : 0x%p" " - 0x%p" " (%4d kB)\n", + + MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + + (PAGE_SIZE)), + MLK(FIXADDR_START, FIXADDR_TOP), + MLM(VMALLOC_START, (unsigned long)VMALLOC_END), + MLM(PAGE_OFFSET, (unsigned long)high_memory), +#ifdef CONFIG_HIGHMEM + MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * + (PAGE_SIZE)), +#endif + MLM(MODULES_VADDR, MODULES_END), + + MLK_ROUNDUP(__init_begin, __init_end), + MLK_ROUNDUP(_text, _etext), + MLK_ROUNDUP(_data, _edata)); + +#undef MLK +#undef MLM +#undef MLK_ROUNDUP + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.2.3 From a7bd08c82e4f74387a39eeebb942712f23967420 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:46:33 +0100 Subject: ARM: 5927/1: Make delimiters of DMA area globally visibly. Adds DMA area to 'virtual memory map' startup message Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 19 +++++++++++-------- arch/arm/mm/dma-mapping.c | 3 --- arch/arm/mm/init.c | 6 ++++++ 3 files changed, 17 insertions(+), 11 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 5421d82a2572..f5e693b8bab3 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -76,6 +76,17 @@ */ #define IOREMAP_MAX_ORDER 24 +/* + * Size of DMA-consistent memory region. Must be multiple of 2M, + * between 2MB and 14MB inclusive. + */ +#ifndef CONSISTENT_DMA_SIZE +#define CONSISTENT_DMA_SIZE SZ_2M +#endif + +#define CONSISTENT_END (0xffe00000UL) +#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) + #else /* CONFIG_MMU */ /* @@ -112,14 +123,6 @@ #endif /* !CONFIG_MMU */ -/* - * Size of DMA-consistent memory region. Must be multiple of 2M, - * between 2MB and 14MB inclusive. - */ -#ifndef CONSISTENT_DMA_SIZE -#define CONSISTENT_DMA_SIZE SZ_2M -#endif - /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 26325cb5d368..48eedab1609b 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -29,9 +29,6 @@ #error "CONSISTENT_DMA_SIZE must be multiple of 2MiB" #endif -#define CONSISTENT_END (0xffe00000) -#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE) - #define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT) #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index e8e3a74ac5b5..bda481e6bc0f 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -651,6 +651,9 @@ void __init mem_init(void) printk(KERN_NOTICE "Virtual kernel memory layout:\n" " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" +#ifdef CONFIG_MMU + " DMA : 0x%08lx - 0x%08lx (%4ld MB)\n" +#endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" #ifdef CONFIG_HIGHMEM @@ -664,6 +667,9 @@ void __init mem_init(void) MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + (PAGE_SIZE)), MLK(FIXADDR_START, FIXADDR_TOP), +#ifdef CONFIG_MMU + MLM(CONSISTENT_BASE, CONSISTENT_END), +#endif MLM(VMALLOC_START, (unsigned long)VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM -- cgit v1.2.3 From c931b4f655a1b86c929384e674eb8c31795f3bd7 Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:47:17 +0100 Subject: ARM: 5928/1: Change type of VMALLOC_END to unsigned long. Makes it consistent with VMALLOC_START Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/include/asm/memory.h | 4 ++-- arch/arm/include/asm/pgtable-nommu.h | 4 ++-- arch/arm/mach-davinci/include/mach/hardware.h | 2 +- arch/arm/mach-dove/include/mach/vmalloc.h | 2 +- arch/arm/mach-ep93xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-gemini/include/mach/vmalloc.h | 2 +- arch/arm/mach-iop32x/include/mach/vmalloc.h | 2 +- arch/arm/mach-iop33x/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp2000/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp23xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-ixp4xx/include/mach/vmalloc.h | 2 +- arch/arm/mach-kirkwood/include/mach/vmalloc.h | 2 +- arch/arm/mach-lh7a40x/include/mach/vmalloc.h | 2 +- arch/arm/mach-loki/include/mach/vmalloc.h | 2 +- arch/arm/mach-mmp/include/mach/vmalloc.h | 2 +- arch/arm/mach-mv78xx0/include/mach/vmalloc.h | 2 +- arch/arm/mach-nomadik/include/mach/vmalloc.h | 2 +- arch/arm/mach-ns9xxx/include/mach/vmalloc.h | 2 +- arch/arm/mach-orion5x/include/mach/vmalloc.h | 2 +- arch/arm/mach-pxa/include/mach/vmalloc.h | 2 +- arch/arm/mach-realview/include/mach/vmalloc.h | 2 +- arch/arm/mach-s3c24a0/include/mach/vmalloc.h | 2 +- arch/arm/mach-sa1100/include/mach/vmalloc.h | 2 +- arch/arm/mach-u300/include/mach/vmalloc.h | 2 +- arch/arm/mach-ux500/include/mach/vmalloc.h | 2 +- arch/arm/mach-w90x900/include/mach/vmalloc.h | 2 +- arch/arm/mm/init.c | 2 +- arch/arm/plat-mxc/include/mach/vmalloc.h | 2 +- arch/arm/plat-s3c/include/mach/vmalloc.h | 2 +- arch/arm/plat-stmp3xxx/include/mach/vmalloc.h | 2 +- 30 files changed, 32 insertions(+), 32 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index f5e693b8bab3..4312ee5e3d0b 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -104,11 +104,11 @@ #endif #ifndef PHYS_OFFSET -#define PHYS_OFFSET (CONFIG_DRAM_BASE) +#define PHYS_OFFSET UL(CONFIG_DRAM_BASE) #endif #ifndef END_MEM -#define END_MEM (CONFIG_DRAM_BASE + CONFIG_DRAM_SIZE) +#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif #ifndef PAGE_OFFSET diff --git a/arch/arm/include/asm/pgtable-nommu.h b/arch/arm/include/asm/pgtable-nommu.h index b011f2e939aa..013cfcdc4839 100644 --- a/arch/arm/include/asm/pgtable-nommu.h +++ b/arch/arm/include/asm/pgtable-nommu.h @@ -86,8 +86,8 @@ extern unsigned int kobjsize(const void *objp); * All 32bit addresses are effectively valid for vmalloc... * Sort of meaningless for non-VM targets. */ -#define VMALLOC_START 0 -#define VMALLOC_END 0xffffffff +#define VMALLOC_START 0UL +#define VMALLOC_END 0xffffffffUL #define FIRST_USER_ADDRESS (0) diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h index 41c89386e39b..c45ba1f62a11 100644 --- a/arch/arm/mach-davinci/include/mach/hardware.h +++ b/arch/arm/mach-davinci/include/mach/hardware.h @@ -27,7 +27,7 @@ /* * I/O mapping */ -#define IO_PHYS 0x01c00000 +#define IO_PHYS 0x01c00000UL #define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */ #define IO_SIZE 0x00400000 #define IO_VIRT (IO_PHYS + IO_OFFSET) diff --git a/arch/arm/mach-dove/include/mach/vmalloc.h b/arch/arm/mach-dove/include/mach/vmalloc.h index 8b2c974755c6..a28792cf761e 100644 --- a/arch/arm/mach-dove/include/mach/vmalloc.h +++ b/arch/arm/mach-dove/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-dove/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfd800000 +#define VMALLOC_END 0xfd800000UL diff --git a/arch/arm/mach-ep93xx/include/mach/vmalloc.h b/arch/arm/mach-ep93xx/include/mach/vmalloc.h index aed21cd3fe2d..1b3f25d03d39 100644 --- a/arch/arm/mach-ep93xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ep93xx/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-ep93xx/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-gemini/include/mach/vmalloc.h b/arch/arm/mach-gemini/include/mach/vmalloc.h index 83e536d9436c..45371eb86fcb 100644 --- a/arch/arm/mach-gemini/include/mach/vmalloc.h +++ b/arch/arm/mach-gemini/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * (at your option) any later version. */ -#define VMALLOC_END 0xF0000000 +#define VMALLOC_END 0xf0000000UL diff --git a/arch/arm/mach-iop32x/include/mach/vmalloc.h b/arch/arm/mach-iop32x/include/mach/vmalloc.h index 85ceb09d85f0..c4862d48e583 100644 --- a/arch/arm/mach-iop32x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop32x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-iop32x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-iop33x/include/mach/vmalloc.h b/arch/arm/mach-iop33x/include/mach/vmalloc.h index f9f99dea9bc4..48331dc23704 100644 --- a/arch/arm/mach-iop33x/include/mach/vmalloc.h +++ b/arch/arm/mach-iop33x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-iop33x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-ixp2000/include/mach/vmalloc.h b/arch/arm/mach-ixp2000/include/mach/vmalloc.h index d195e35aed3b..61c8dae24f95 100644 --- a/arch/arm/mach-ixp2000/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp2000/include/mach/vmalloc.h @@ -17,4 +17,4 @@ * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) */ -#define VMALLOC_END 0xfb000000 +#define VMALLOC_END 0xfb000000UL diff --git a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h index dd519f678d10..896c56a1c00e 100644 --- a/arch/arm/mach-ixp23xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp23xx/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * specific static I/O. */ -#define VMALLOC_END (0xec000000) +#define VMALLOC_END (0xec000000UL) diff --git a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h index 7b3580b53adf..9bcd64d59854 100644 --- a/arch/arm/mach-ixp4xx/include/mach/vmalloc.h +++ b/arch/arm/mach-ixp4xx/include/mach/vmalloc.h @@ -1,5 +1,5 @@ /* * arch/arm/mach-ixp4xx/include/mach/vmalloc.h */ -#define VMALLOC_END (0xFF000000) +#define VMALLOC_END (0xff000000UL) diff --git a/arch/arm/mach-kirkwood/include/mach/vmalloc.h b/arch/arm/mach-kirkwood/include/mach/vmalloc.h index 8f48260dcdad..bf162ca3d2c1 100644 --- a/arch/arm/mach-kirkwood/include/mach/vmalloc.h +++ b/arch/arm/mach-kirkwood/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-kirkwood/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h index 3fbd49490bb9..d62da7358b16 100644 --- a/arch/arm/mach-lh7a40x/include/mach/vmalloc.h +++ b/arch/arm/mach-lh7a40x/include/mach/vmalloc.h @@ -7,4 +7,4 @@ * version 2 as published by the Free Software Foundation. * */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-loki/include/mach/vmalloc.h b/arch/arm/mach-loki/include/mach/vmalloc.h index 8dc3bfcbf9f0..5dcbd865443f 100644 --- a/arch/arm/mach-loki/include/mach/vmalloc.h +++ b/arch/arm/mach-loki/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-loki/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-mmp/include/mach/vmalloc.h b/arch/arm/mach-mmp/include/mach/vmalloc.h index b60ccaf9fee7..1d0bac003ad0 100644 --- a/arch/arm/mach-mmp/include/mach/vmalloc.h +++ b/arch/arm/mach-mmp/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * linux/arch/arm/mach-mmp/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h index 1c4954386a84..ba26fe98e640 100644 --- a/arch/arm/mach-mv78xx0/include/mach/vmalloc.h +++ b/arch/arm/mach-mv78xx0/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-mv78xx0/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfe000000 +#define VMALLOC_END 0xfe000000UL diff --git a/arch/arm/mach-nomadik/include/mach/vmalloc.h b/arch/arm/mach-nomadik/include/mach/vmalloc.h index be12e31ea528..f83d574d9445 100644 --- a/arch/arm/mach-nomadik/include/mach/vmalloc.h +++ b/arch/arm/mach-nomadik/include/mach/vmalloc.h @@ -1,2 +1,2 @@ -#define VMALLOC_END 0xe8000000 +#define VMALLOC_END 0xe8000000UL diff --git a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h index fe964d3bcc47..c8651974c4b0 100644 --- a/arch/arm/mach-ns9xxx/include/mach/vmalloc.h +++ b/arch/arm/mach-ns9xxx/include/mach/vmalloc.h @@ -11,6 +11,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xf0000000) +#define VMALLOC_END (0xf0000000UL) #endif /* ifndef __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mach-orion5x/include/mach/vmalloc.h b/arch/arm/mach-orion5x/include/mach/vmalloc.h index 7147a297e97f..06b50aeff7b9 100644 --- a/arch/arm/mach-orion5x/include/mach/vmalloc.h +++ b/arch/arm/mach-orion5x/include/mach/vmalloc.h @@ -2,4 +2,4 @@ * arch/arm/mach-orion5x/include/mach/vmalloc.h */ -#define VMALLOC_END 0xfd800000 +#define VMALLOC_END 0xfd800000UL diff --git a/arch/arm/mach-pxa/include/mach/vmalloc.h b/arch/arm/mach-pxa/include/mach/vmalloc.h index e90c5eeb81dd..bfecfbf5f460 100644 --- a/arch/arm/mach-pxa/include/mach/vmalloc.h +++ b/arch/arm/mach-pxa/include/mach/vmalloc.h @@ -8,4 +8,4 @@ * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-realview/include/mach/vmalloc.h b/arch/arm/mach-realview/include/mach/vmalloc.h index fe0de1b507ac..a2a4c6861407 100644 --- a/arch/arm/mach-realview/include/mach/vmalloc.h +++ b/arch/arm/mach-realview/include/mach/vmalloc.h @@ -18,4 +18,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define VMALLOC_END 0xf8000000 +#define VMALLOC_END 0xf8000000UL diff --git a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h index 4d4fe4849589..914656820794 100644 --- a/arch/arm/mach-s3c24a0/include/mach/vmalloc.h +++ b/arch/arm/mach-s3c24a0/include/mach/vmalloc.h @@ -12,6 +12,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mach-sa1100/include/mach/vmalloc.h b/arch/arm/mach-sa1100/include/mach/vmalloc.h index ec8fdc5a3606..b3d002398480 100644 --- a/arch/arm/mach-sa1100/include/mach/vmalloc.h +++ b/arch/arm/mach-sa1100/include/mach/vmalloc.h @@ -1,4 +1,4 @@ /* * arch/arm/mach-sa1100/include/mach/vmalloc.h */ -#define VMALLOC_END (0xe8000000) +#define VMALLOC_END (0xe8000000UL) diff --git a/arch/arm/mach-u300/include/mach/vmalloc.h b/arch/arm/mach-u300/include/mach/vmalloc.h index b00c51a66fbe..ec423b92b81d 100644 --- a/arch/arm/mach-u300/include/mach/vmalloc.h +++ b/arch/arm/mach-u300/include/mach/vmalloc.h @@ -9,4 +9,4 @@ * End must be above the I/O registers and on an even 2MiB boundary. * Author: Linus Walleij */ -#define VMALLOC_END 0xfe800000 +#define VMALLOC_END 0xfe800000UL diff --git a/arch/arm/mach-ux500/include/mach/vmalloc.h b/arch/arm/mach-ux500/include/mach/vmalloc.h index 86cdbbce1842..a4945cb41172 100644 --- a/arch/arm/mach-ux500/include/mach/vmalloc.h +++ b/arch/arm/mach-ux500/include/mach/vmalloc.h @@ -15,4 +15,4 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#define VMALLOC_END 0xf0000000 +#define VMALLOC_END 0xf0000000UL diff --git a/arch/arm/mach-w90x900/include/mach/vmalloc.h b/arch/arm/mach-w90x900/include/mach/vmalloc.h index 2f9dfb928533..b067e44500a4 100644 --- a/arch/arm/mach-w90x900/include/mach/vmalloc.h +++ b/arch/arm/mach-w90x900/include/mach/vmalloc.h @@ -18,6 +18,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index bda481e6bc0f..3a2077239474 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -670,7 +670,7 @@ void __init mem_init(void) #ifdef CONFIG_MMU MLM(CONSISTENT_BASE, CONSISTENT_END), #endif - MLM(VMALLOC_START, (unsigned long)VMALLOC_END), + MLM(VMALLOC_START, VMALLOC_END), MLM(PAGE_OFFSET, (unsigned long)high_memory), #ifdef CONFIG_HIGHMEM MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) * diff --git a/arch/arm/plat-mxc/include/mach/vmalloc.h b/arch/arm/plat-mxc/include/mach/vmalloc.h index 62d97623412f..44243a278434 100644 --- a/arch/arm/plat-mxc/include/mach/vmalloc.h +++ b/arch/arm/plat-mxc/include/mach/vmalloc.h @@ -21,6 +21,6 @@ #define __ASM_ARCH_MXC_VMALLOC_H__ /* vmalloc ending address */ -#define VMALLOC_END 0xF4000000 +#define VMALLOC_END 0xf4000000UL #endif /* __ASM_ARCH_MXC_VMALLOC_H__ */ diff --git a/arch/arm/plat-s3c/include/mach/vmalloc.h b/arch/arm/plat-s3c/include/mach/vmalloc.h index bfd2ca6e3074..299d95f365c9 100644 --- a/arch/arm/plat-s3c/include/mach/vmalloc.h +++ b/arch/arm/plat-s3c/include/mach/vmalloc.h @@ -15,6 +15,6 @@ #ifndef __ASM_ARCH_VMALLOC_H #define __ASM_ARCH_VMALLOC_H -#define VMALLOC_END (0xE0000000) +#define VMALLOC_END (0xe0000000UL) #endif /* __ASM_ARCH_VMALLOC_H */ diff --git a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h index 541b880c1863..943c1a29d641 100644 --- a/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h +++ b/arch/arm/plat-stmp3xxx/include/mach/vmalloc.h @@ -9,4 +9,4 @@ * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ -#define VMALLOC_END (0xF0000000) +#define VMALLOC_END 0xf0000000UL -- cgit v1.2.3 From a183927213df225bd93d21857b6aaafbb95e590d Mon Sep 17 00:00:00 2001 From: Fenkart/Bostandzhyan Date: Sun, 7 Feb 2010 21:47:58 +0100 Subject: ARM: 5929/1: Add checks to detect overlap of memory regions. Tested-by: H Hartley Sweeten Signed-off-by: Andreas Fenkart Signed-off-by: Russell King --- arch/arm/mm/init.c | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3a2077239474..7829cb5425f5 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -686,6 +686,23 @@ void __init mem_init(void) #undef MLM #undef MLK_ROUNDUP + /* + * Check boundaries twice: Some fundamental inconsistencies can + * be detected at build time already. + */ +#ifdef CONFIG_MMU + BUILD_BUG_ON(VMALLOC_END > CONSISTENT_BASE); + BUG_ON(VMALLOC_END > CONSISTENT_BASE); + + BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR); + BUG_ON(TASK_SIZE > MODULES_VADDR); +#endif + +#ifdef CONFIG_HIGHMEM + BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); + BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET); +#endif + if (PAGE_SIZE >= 16384 && num_physpages <= 128) { extern int sysctl_overcommit_memory; /* -- cgit v1.2.3 From d6d502fa4be1acd01971476fc732c95a4da16d90 Mon Sep 17 00:00:00 2001 From: Kukjin Kim Date: Mon, 22 Feb 2010 00:02:59 +0100 Subject: ARM: 5952/1: ARM: MM: Add ARM_L1_CACHE_SHIFT_6 for handle inside each ARCH Kconfig Add ARM_L1_CACHE_SHIFT_6 to arch/arm/Kconfig to allow CPUs with L1 cache lines which are 64bytes to indicate this without having to alter the arch/arm/mm/Kconfig entry each time. Update the mm Kconfig so that ARM_L1_CACHE_SHIFT default value uses this and change OMAP3 and S5PC1XX to select ARM_L1_CACHE_SHIFT_6. Acked-by: Ben Dooks Acked-by: Tony Lindgren Signed-off-by: Kukjin Kim Signed-off-by: Russell King --- arch/arm/Kconfig | 6 ++++++ arch/arm/mm/Kconfig | 2 +- arch/arm/plat-omap/Kconfig | 1 + 3 files changed, 8 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0f1ad743ccdd..04247ba31d69 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -165,6 +165,11 @@ config ARCH_MTD_XIP config GENERIC_HARDIRQS_NO__DO_IRQ def_bool y +config ARM_L1_CACHE_SHIFT_6 + bool + help + Setting ARM L1 cache line size to 64 Bytes. + if OPROFILE config OPROFILE_ARMV6 @@ -642,6 +647,7 @@ config ARCH_S5PC1XX select GENERIC_GPIO select HAVE_CLK select CPU_V7 + select ARM_L1_CACHE_SHIFT_6 help Samsung S5PC1XX series based systems diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index e859743024a0..c4ed9f93f646 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -779,5 +779,5 @@ config CACHE_XSC3L2 config ARM_L1_CACHE_SHIFT int - default 6 if ARCH_OMAP3 || ARCH_S5PC1XX + default 6 if ARM_L1_CACHE_SHIFT_6 default 5 diff --git a/arch/arm/plat-omap/Kconfig b/arch/arm/plat-omap/Kconfig index e2ea04a4c8a1..2e3eec660864 100644 --- a/arch/arm/plat-omap/Kconfig +++ b/arch/arm/plat-omap/Kconfig @@ -22,6 +22,7 @@ config ARCH_OMAP3 bool "TI OMAP3" select CPU_V7 select COMMON_CLKDEV + select ARM_L1_CACHE_SHIFT_6 config ARCH_OMAP4 bool "TI OMAP4" -- cgit v1.2.3