From 5aafec15bdc54cf0722696c95091d7bd674bfcad Mon Sep 17 00:00:00 2001 From: Russell King Date: Tue, 1 Nov 2011 10:15:27 +0000 Subject: ARM: restart: remove argument to setup_mm_for_reboot() setup_mm_for_reboot() doesn't make use of its argument, so remove it. Acked-by: Nicolas Pitre Acked-by: Will Deacon Acked-by: H Hartley Sweeten Acked-by: Tony Lindgren Signed-off-by: Russell King --- arch/arm/mm/idmap.c | 2 +- arch/arm/mm/nommu.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 2be9139a4ef3..296ad2eaddb0 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -78,7 +78,7 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) * the user-mode pages. This will then ensure that we have predictable * results when turning the mmu off */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { /* * We need to access to user-mode page tables here. For kernel threads diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 941a98c9e8aa..88417514b2c6 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -43,7 +43,7 @@ void __init paging_init(struct machine_desc *mdesc) /* * We don't need to do anything here for nommu machines. */ -void setup_mm_for_reboot(char mode) +void setup_mm_for_reboot(void) { } -- cgit v1.2.3 From 27a3f0e91bed0f4dcf0a363e5f5938126d1ff4e5 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 25 Aug 2011 19:10:29 -0400 Subject: ARM: sort the meminfo array earlier The meminfo array has to be sorted before sanity_check_meminfo() in arch/arm/mm/mmu.c is called for it to work properly. This also allows for a simpler find_limits() in arch/arm/mm/init.c. The sort is moved to arch/arm/kernel/setup.c because that's where the meminfo array is populated. Eventually this should be improved upon to make the memory bank parser a bit more robust against problems such as overlapping memory ranges. Signed-off-by: Nicolas Pitre --- arch/arm/kernel/setup.c | 8 ++++++++ arch/arm/mm/init.c | 38 ++++++++------------------------------ 2 files changed, 16 insertions(+), 30 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index 7e7977ab994f..44510f879312 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -888,6 +889,12 @@ static struct machine_desc * __init setup_machine_tags(unsigned int nr) return mdesc; } +static int __init meminfo_cmp(const void *_a, const void *_b) +{ + const struct membank *a = _a, *b = _b; + long cmp = bank_pfn_start(a) - bank_pfn_start(b); + return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; +} void __init setup_arch(char **cmdline_p) { @@ -916,6 +923,7 @@ void __init setup_arch(char **cmdline_p) parse_early_param(); + sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); sanity_check_meminfo(); arm_memblock_init(&meminfo, mdesc); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index fbdd12ea3a58..d366051e14fe 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -20,7 +20,6 @@ #include #include #include -#include #include #include @@ -134,30 +133,18 @@ void show_mem(unsigned int filter) } static void __init find_limits(unsigned long *min, unsigned long *max_low, - unsigned long *max_high) + unsigned long *max_high) { struct meminfo *mi = &meminfo; int i; - *min = -1UL; - *max_low = *max_high = 0; - - for_each_bank (i, mi) { - struct membank *bank = &mi->bank[i]; - unsigned long start, end; - - start = bank_pfn_start(bank); - end = bank_pfn_end(bank); - - if (*min > start) - *min = start; - if (*max_high < end) - *max_high = end; - if (bank->highmem) - continue; - if (*max_low < end) - *max_low = end; - } + /* This assumes the meminfo array is properly sorted */ + *min = bank_pfn_start(&mi->bank[0]); + for_each_bank (i, mi) + if (mi->bank[i].highmem) + break; + *max_low = bank_pfn_end(&mi->bank[i - 1]); + *max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]); } static void __init arm_bootmem_init(unsigned long start_pfn, @@ -319,19 +306,10 @@ static void arm_memory_present(void) } #endif -static int __init meminfo_cmp(const void *_a, const void *_b) -{ - const struct membank *a = _a, *b = _b; - long cmp = bank_pfn_start(a) - bank_pfn_start(b); - return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; -} - void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc) { int i; - sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL); - memblock_init(); for (i = 0; i < mi->nr_banks; i++) memblock_add(mi->bank[i].start, mi->bank[i].size); -- cgit v1.2.3 From 55a8173cfe1c6b489f8f5705282c762aed2e265e Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Sun, 18 Sep 2011 22:40:00 -0400 Subject: ARM: move initialization of the high_memory variable earlier Some upcoming changes must know the VMALLOC_START value, which is based on high_memory, before bootmem_init() is called. The best location to set it is in sanity_check_meminfo() where the needed computation is already done, and in the non MMU case it is trivial to do now that the meminfo array is already sorted at that point. Signed-off-by: Nicolas Pitre --- arch/arm/mm/init.c | 2 -- arch/arm/mm/mmu.c | 1 + arch/arm/mm/nommu.c | 2 ++ 3 files changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index d366051e14fe..786adddf1a86 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -381,8 +381,6 @@ void __init bootmem_init(void) */ arm_bootmem_free(min, max_low, max_high); - high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; - /* * This doesn't seem to be used by the Linux memory manager any * more, but is used by ll_rw_block. If we can get rid of it, we diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index dc8c550e6cbd..0aa8b7d5b21d 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -860,6 +860,7 @@ void __init sanity_check_meminfo(void) } #endif meminfo.nr_banks = j; + high_memory = __va(lowmem_limit - 1) + 1; memblock_set_current_limit(lowmem_limit); } diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index 941a98c9e8aa..9348730a6ba5 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c @@ -29,6 +29,8 @@ void __init arm_mm_memblock_reserve(void) void __init sanity_check_meminfo(void) { + phys_addr_t end = bank_phys_end(&meminfo.bank[meminfo.nr_banks - 1]); + high_memory = __va(end - 1) + 1; } /* -- cgit v1.2.3 From 0536bdf33faff4d940ac094c77998cfac368cfff Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 25 Aug 2011 00:35:59 -0400 Subject: ARM: move iotable mappings within the vmalloc region In order to remove the build time variation between different SOCs with regards to VMALLOC_END, the iotable mappings are now allocated inside the vmalloc region. This allows for VMALLOC_END to be identical across all machines. The value for VMALLOC_END is now set to 0xff000000 which is right where the consistent DMA area starts. To accommodate all static mappings on machines with possible highmem usage, the default vmalloc area size is changed to 240 MB so that VMALLOC_START is no higher than 0xf0000000 by default. Signed-off-by: Nicolas Pitre Tested-by: Stephen Warren Tested-by: Kevin Hilman Tested-by: Jamie Iles --- Documentation/arm/memory.txt | 11 +++++----- arch/arm/include/asm/pgtable.h | 8 +------ arch/arm/mm/mmu.c | 49 ++++++++++++++++++++++++++++++------------ 3 files changed, 41 insertions(+), 27 deletions(-) (limited to 'arch/arm/mm') diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt index 771d48d3b335..208a2d465b92 100644 --- a/Documentation/arm/memory.txt +++ b/Documentation/arm/memory.txt @@ -51,15 +51,14 @@ ffc00000 ffefffff DMA memory mapping region. Memory returned ff000000 ffbfffff Reserved for future expansion of DMA mapping region. -VMALLOC_END feffffff Free for platform use, recommended. - VMALLOC_END must be aligned to a 2MB - boundary. - VMALLOC_START VMALLOC_END-1 vmalloc() / ioremap() space. Memory returned by vmalloc/ioremap will be dynamically placed in this region. - VMALLOC_START may be based upon the value - of the high_memory variable. + Machine specific static mappings are also + located here through iotable_init(). + VMALLOC_START is based upon the value + of the high_memory variable, and VMALLOC_END + is equal to 0xff000000. PAGE_OFFSET high_memory-1 Kernel direct-mapped RAM region. This maps the platforms RAM, and typically diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a553..6cdd55cb0b8c 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -21,7 +21,6 @@ #else #include -#include #include #include @@ -33,15 +32,10 @@ * any out-of-bounds memory accesses will hopefully be caught. * The vmalloc() routines leaves a hole of 4kB between each vmalloced * area for the same reason. ;) - * - * Note that platforms may override VMALLOC_START, but they must provide - * VMALLOC_END. VMALLOC_END defines the (exclusive) limit of this space, - * which may not overlap IO space. */ -#ifndef VMALLOC_START #define VMALLOC_OFFSET (8*1024*1024) #define VMALLOC_START (((unsigned long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1)) -#endif +#define VMALLOC_END 0xff000000UL #define LIBRARY_TEXT_START 0x0c000000 diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 0aa8b7d5b21d..c61481577ae1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -529,13 +530,18 @@ EXPORT_SYMBOL(phys_mem_access_prot); #define vectors_base() (vectors_high() ? 0xffff0000 : 0) -static void __init *early_alloc(unsigned long sz) +static void __init *early_alloc_aligned(unsigned long sz, unsigned long align) { - void *ptr = __va(memblock_alloc(sz, sz)); + void *ptr = __va(memblock_alloc(sz, align)); memset(ptr, 0, sz); return ptr; } +static void __init *early_alloc(unsigned long sz) +{ + return early_alloc_aligned(sz, sz); +} + static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { @@ -685,9 +691,10 @@ static void __init create_mapping(struct map_desc *md) } if ((md->type == MT_DEVICE || md->type == MT_ROM) && - md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { + md->virtual >= PAGE_OFFSET && + (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { printk(KERN_WARNING "BUG: mapping for 0x%08llx" - " at 0x%08lx overlaps vmalloc space\n", + " at 0x%08lx out of vmalloc space\n", (long long)__pfn_to_phys((u64)md->pfn), md->virtual); } @@ -729,18 +736,32 @@ static void __init create_mapping(struct map_desc *md) */ void __init iotable_init(struct map_desc *io_desc, int nr) { - int i; + struct map_desc *md; + struct vm_struct *vm; + + if (!nr) + return; - for (i = 0; i < nr; i++) - create_mapping(io_desc + i); + vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); + + for (md = io_desc; nr; md++, nr--) { + create_mapping(md); + vm->addr = (void *)(md->virtual & PAGE_MASK); + vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); + vm->phys_addr = __pfn_to_phys(md->pfn); + vm->flags = VM_IOREMAP; + vm->caller = iotable_init; + vm_area_add_early(vm++); + } } -static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); +static void * __initdata vmalloc_min = + (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET); /* * vmalloc=size forces the vmalloc area to be exactly 'size' * bytes. This can be used to increase (or decrease) the vmalloc - * area - the default is 128m. + * area - the default is 240m. */ static int __init early_vmalloc(char *arg) { @@ -891,10 +912,10 @@ static inline void prepare_page_table(void) /* * Clear out all the kernel space mappings, except for the first - * memory bank, up to the end of the vmalloc region. + * memory bank, up to the vmalloc region. */ for (addr = __phys_to_virt(end); - addr < VMALLOC_END; addr += PMD_SIZE) + addr < VMALLOC_START; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); } @@ -921,8 +942,8 @@ void __init arm_mm_memblock_reserve(void) } /* - * Set up device the mappings. Since we clear out the page tables for all - * mappings above VMALLOC_END, we will remove any debug device mappings. + * Set up the device mappings. Since we clear out the page tables for all + * mappings above VMALLOC_START, we will remove any debug device mappings. * This means you have to be careful how you debug this function, or any * called function. This means you can't use any function or debugging * method which may touch any device, otherwise the kernel _will_ crash. @@ -937,7 +958,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc) */ vectors_page = early_alloc(PAGE_SIZE); - for (addr = VMALLOC_END; addr; addr += PMD_SIZE) + for (addr = VMALLOC_START; addr; addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); /* -- cgit v1.2.3 From 6ee723a6570a897208b76ab3e9a495e9106b2f8c Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Thu, 15 Sep 2011 22:12:19 -0400 Subject: ARM: simplify __iounmap() when dealing with section based mapping Firstly, there is no need to have a double pointer here as we're only walking the vmlist and not modifying it. Secondly, for the same reason, we don't need a write lock but only a read lock here, since the lock only protects the coherency of the list nothing else. Lastly, the reason for holding a lock is not what the comment says, so let's remove that misleading piece of information. Signed-off-by: Nicolas Pitre --- arch/arm/mm/ioremap.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bdb248c4f55c..bc7d9bd766d1 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -314,26 +314,24 @@ void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); #ifndef CONFIG_SMP - struct vm_struct **p, *tmp; + struct vm_struct *vm; /* * If this is a section based mapping we need to handle it * specially as the VM subsystem does not know how to handle - * such a beast. We need the lock here b/c we need to clear - * all the mappings before the area can be reclaimed - * by someone else. + * such a beast. */ - write_lock(&vmlist_lock); - for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { - if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) { - if (tmp->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)tmp->addr, - tmp->size); + read_lock(&vmlist_lock); + for (vm = vmlist; vm; vm = vm->next) { + if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { + if (vm->flags & VM_ARM_SECTION_MAPPING) { + unmap_area_sections((unsigned long)vm->addr, + vm->size); } break; } } - write_unlock(&vmlist_lock); + read_unlock(&vmlist_lock); #endif vunmap(addr); -- cgit v1.2.3 From 576d2f2525612ecb5af029a76f21f22a3b82563d Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Fri, 16 Sep 2011 01:14:23 -0400 Subject: ARM: add generic ioremap optimization by reusing static mappings Now that we have all the static mappings from iotable_init() located in the vmalloc area, it is trivial to optimize ioremap by reusing those static mappings when the requested physical area fits in one of them, and so in a generic way for all platforms. Signed-off-by: Nicolas Pitre Tested-by: Stephen Warren Tested-by: Kevin Hilman Tested-by: Jamie Iles --- arch/arm/mm/ioremap.c | 72 ++++++++++++++++++++++++++++++++++----------------- arch/arm/mm/mm.h | 14 ++++++++++ arch/arm/mm/mmu.c | 3 ++- 3 files changed, 64 insertions(+), 25 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bc7d9bd766d1..12c7ad215ce7 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -36,12 +36,6 @@ #include #include "mm.h" -/* - * Used by ioremap() and iounmap() code to mark (super)section-mapped - * I/O regions in vm_struct->flags field. - */ -#define VM_ARM_SECTION_MAPPING 0x80000000 - int ioremap_page(unsigned long virt, unsigned long phys, const struct mem_type *mtype) { @@ -201,12 +195,6 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; - /* - * Don't allow RAM to be mapped - this causes problems with ARMv6+ - */ - if (WARN_ON(pfn_valid(pfn))) - return NULL; - type = get_mem_type(mtype); if (!type) return NULL; @@ -216,6 +204,34 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, */ size = PAGE_ALIGN(offset + size); + /* + * Try to reuse one of the static mapping whenever possible. + */ + read_lock(&vmlist_lock); + for (area = vmlist; area; area = area->next) { + if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) + break; + if (!(area->flags & VM_ARM_STATIC_MAPPING)) + continue; + if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) + continue; + if (__phys_to_pfn(area->phys_addr) > pfn || + __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1) + continue; + /* we can drop the lock here as we know *area is static */ + read_unlock(&vmlist_lock); + addr = (unsigned long)area->addr; + addr += __pfn_to_phys(pfn) - area->phys_addr; + return (void __iomem *) (offset + addr); + } + read_unlock(&vmlist_lock); + + /* + * Don't allow RAM to be mapped - this causes problems with ARMv6+ + */ + if (WARN_ON(pfn_valid(pfn))) + return NULL; + area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; @@ -313,26 +329,34 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); -#ifndef CONFIG_SMP struct vm_struct *vm; - /* - * If this is a section based mapping we need to handle it - * specially as the VM subsystem does not know how to handle - * such a beast. - */ read_lock(&vmlist_lock); for (vm = vmlist; vm; vm = vm->next) { - if ((vm->flags & VM_IOREMAP) && (vm->addr == addr)) { - if (vm->flags & VM_ARM_SECTION_MAPPING) { - unmap_area_sections((unsigned long)vm->addr, - vm->size); - } + if (vm->addr > addr) break; + if (!(vm->flags & VM_IOREMAP)) + continue; + /* If this is a static mapping we must leave it alone */ + if ((vm->flags & VM_ARM_STATIC_MAPPING) && + (vm->addr <= addr) && (vm->addr + vm->size > addr)) { + read_unlock(&vmlist_lock); + return; } +#ifndef CONFIG_SMP + /* + * If this is a section based mapping we need to handle it + * specially as the VM subsystem does not know how to handle + * such a beast. + */ + if ((vm->addr == addr) && + (vm->flags & VM_ARM_SECTION_MAPPING)) { + unmap_area_sections((unsigned long)vm->addr, vm->size); + break; + } +#endif } read_unlock(&vmlist_lock); -#endif vunmap(addr); } diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index ad7cce3bc431..70f6d3ea4834 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -21,6 +21,20 @@ const struct mem_type *get_mem_type(unsigned int type); extern void __flush_dcache_page(struct address_space *mapping, struct page *page); +/* + * ARM specific vm_struct->flags bits. + */ + +/* (super)section-mapped I/O regions used by ioremap()/iounmap() */ +#define VM_ARM_SECTION_MAPPING 0x80000000 + +/* permanent static mappings from iotable_init() */ +#define VM_ARM_STATIC_MAPPING 0x40000000 + +/* mapping type (attributes) for permanent static mappings */ +#define VM_ARM_MTYPE(mt) ((mt) << 20) +#define VM_ARM_MTYPE_MASK (0x1f << 20) + #endif #ifdef CONFIG_ZONE_DMA diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index c61481577ae1..27e366af67f9 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -749,7 +749,8 @@ void __init iotable_init(struct map_desc *io_desc, int nr) vm->addr = (void *)(md->virtual & PAGE_MASK); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->phys_addr = __pfn_to_phys(md->pfn); - vm->flags = VM_IOREMAP; + vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; + vm->flags |= VM_ARM_MTYPE(md->type); vm->caller = iotable_init; vm_area_add_early(vm++); } -- cgit v1.2.3 From 8903826d0cd99aed9267e792d38284cf3092042b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 30 Sep 2011 11:43:29 +0100 Subject: ARM: idmap: populate identity map pgd at init time using .init.text When disabling and re-enabling the MMU, it is necessary to take out an identity mapping for the code that manipulates the SCTLR in order to avoid it disappearing from under our feet. This is useful when soft rebooting and returning from CPU suspend. This patch allocates a set of page tables during boot and populates them with an identity mapping for the .idmap.text section. This means that users of the identity map do not need to manage their own pgd and can instead annotate their functions with __idmap or, in the case of assembly code, place them in the correct section. Acked-by: Dave Martin Reviewed-by: Catalin Marinas Tested-by: Lorenzo Pieralisi Signed-off-by: Will Deacon --- arch/arm/include/asm/idmap.h | 17 +++++++++++++++++ arch/arm/include/asm/pgtable.h | 3 --- arch/arm/kernel/smp.c | 1 + arch/arm/kernel/vmlinux.lds.S | 7 +++++++ arch/arm/mm/idmap.c | 26 ++++++++++++++++++++++++++ 5 files changed, 51 insertions(+), 3 deletions(-) create mode 100644 arch/arm/include/asm/idmap.h (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h new file mode 100644 index 000000000000..62e3d19c9ad7 --- /dev/null +++ b/arch/arm/include/asm/idmap.h @@ -0,0 +1,17 @@ +#ifndef __ASM_IDMAP_H +#define __ASM_IDMAP_H + +#include +#include + +/* Tag a function as requiring to be executed via an identity mapping. */ +#define __idmap __section(.idmap.text) noinline notrace + +extern pgd_t *idmap_pgd; + +void identity_mapping_add(pgd_t *, unsigned long, unsigned long); +void identity_mapping_del(pgd_t *, unsigned long, unsigned long); + +void setup_mm_for_reboot(void); + +#endif /* __ASM_IDMAP_H */ diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index 9451dce3a553..03893a55e680 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -346,9 +346,6 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) #define pgtable_cache_init() do { } while (0) -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - #endif /* !__ASSEMBLY__ */ #endif /* CONFIG_MMU */ diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index ef5640b9e218..8afadda37459 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S index 20b3041e0860..f76e75548670 100644 --- a/arch/arm/kernel/vmlinux.lds.S +++ b/arch/arm/kernel/vmlinux.lds.S @@ -13,6 +13,12 @@ *(.proc.info.init) \ VMLINUX_SYMBOL(__proc_info_end) = .; +#define IDMAP_TEXT \ + ALIGN_FUNCTION(); \ + VMLINUX_SYMBOL(__idmap_text_start) = .; \ + *(.idmap.text) \ + VMLINUX_SYMBOL(__idmap_text_end) = .; + #ifdef CONFIG_HOTPLUG_CPU #define ARM_CPU_DISCARD(x) #define ARM_CPU_KEEP(x) x @@ -92,6 +98,7 @@ SECTIONS SCHED_TEXT LOCK_TEXT KPROBES_TEXT + IDMAP_TEXT #ifdef CONFIG_MMU *(.fixup) #endif diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 296ad2eaddb0..cda5ea3157a7 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -1,8 +1,12 @@ #include #include +#include #include #include +#include + +pgd_t *idmap_pgd; static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) @@ -73,6 +77,28 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) } #endif +extern char __idmap_text_start[], __idmap_text_end[]; + +static int __init init_static_idmap(void) +{ + phys_addr_t idmap_start, idmap_end; + + idmap_pgd = pgd_alloc(&init_mm); + if (!idmap_pgd) + return -ENOMEM; + + /* Add an identity mapping for the physical address of the section. */ + idmap_start = virt_to_phys((void *)__idmap_text_start); + idmap_end = virt_to_phys((void *)__idmap_text_end); + + pr_info("Setting up static identity map for 0x%llx - 0x%llx\n", + (long long)idmap_start, (long long)idmap_end); + identity_mapping_add(idmap_pgd, idmap_start, idmap_end); + + return 0; +} +arch_initcall(init_static_idmap); + /* * In order to soft-boot, we need to insert a 1:1 mapping in place of * the user-mode pages. This will then ensure that we have predictable -- cgit v1.2.3 From 1a4baafa7d203da1cceb302c2df38f0fea1c17a1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 15 Nov 2011 13:25:04 +0000 Subject: ARM: proc-*.S: place cpu_reset functions into .idmap.text section The CPU reset functions disable the MMU and therefore must be executed with an identity mapping in place. This patch places the CPU reset functions into the .idmap.text section, causing the idmap code to include them as part of the identity mapping. Acked-by: Dave Martin Signed-off-by: Will Deacon --- arch/arm/mm/proc-arm1020.S | 3 +++ arch/arm/mm/proc-arm1020e.S | 3 +++ arch/arm/mm/proc-arm1022.S | 3 +++ arch/arm/mm/proc-arm1026.S | 3 +++ arch/arm/mm/proc-arm6_7.S | 4 ++++ arch/arm/mm/proc-arm720.S | 3 +++ arch/arm/mm/proc-arm740.S | 3 +++ arch/arm/mm/proc-arm7tdmi.S | 3 +++ arch/arm/mm/proc-arm920.S | 3 +++ arch/arm/mm/proc-arm922.S | 3 +++ arch/arm/mm/proc-arm925.S | 3 +++ arch/arm/mm/proc-arm926.S | 3 +++ arch/arm/mm/proc-arm940.S | 3 +++ arch/arm/mm/proc-arm946.S | 3 +++ arch/arm/mm/proc-arm9tdmi.S | 3 +++ arch/arm/mm/proc-fa526.S | 3 +++ arch/arm/mm/proc-feroceon.S | 3 +++ arch/arm/mm/proc-mohawk.S | 3 +++ arch/arm/mm/proc-sa110.S | 3 +++ arch/arm/mm/proc-sa1100.S | 3 +++ arch/arm/mm/proc-v6.S | 3 +++ arch/arm/mm/proc-v7.S | 2 ++ arch/arm/mm/proc-xsc3.S | 3 +++ arch/arm/mm/proc-xscale.S | 3 +++ 24 files changed, 72 insertions(+) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 67469665d47a..234951345eb3 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -95,6 +95,7 @@ ENTRY(cpu_arm1020_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -107,6 +108,8 @@ ENTRY(cpu_arm1020_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1020_reset) + .popsection /* * cpu_arm1020_do_idle() diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 4251421c0ed5..c244b06caac9 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -95,6 +95,7 @@ ENTRY(cpu_arm1020e_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1020e_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -107,6 +108,8 @@ ENTRY(cpu_arm1020e_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1020e_reset) + .popsection /* * cpu_arm1020e_do_idle() diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index d283cf3d06e3..38fe22efd18f 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -84,6 +84,7 @@ ENTRY(cpu_arm1022_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1022_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -96,6 +97,8 @@ ENTRY(cpu_arm1022_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1022_reset) + .popsection /* * cpu_arm1022_do_idle() diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 678a1ceafed2..3eb9c3c26c75 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -84,6 +84,7 @@ ENTRY(cpu_arm1026_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm1026_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -96,6 +97,8 @@ ENTRY(cpu_arm1026_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm1026_reset) + .popsection /* * cpu_arm1026_do_idle() diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index e5b974cddac3..4fbeb5b8e6c2 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S @@ -225,6 +225,7 @@ ENTRY(cpu_arm7_set_pte_ext) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm6_reset) ENTRY(cpu_arm7_reset) mov r1, #0 @@ -235,6 +236,9 @@ ENTRY(cpu_arm7_reset) mov r1, #0x30 mcr p15, 0, r1, c1, c0, 0 @ turn off MMU etc mov pc, r0 +ENDPROC(cpu_arm6_reset) +ENDPROC(cpu_arm7_reset) + .popsection __CPUINIT diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 55f4e290665a..0ac908c7ade1 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -101,6 +101,7 @@ ENTRY(cpu_arm720_set_pte_ext) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm720_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate cache @@ -112,6 +113,8 @@ ENTRY(cpu_arm720_reset) bic ip, ip, #0x2100 @ ..v....s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm720_reset) + .popsection __CPUINIT diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 4506be3adda6..dc5de5d53f20 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S @@ -49,6 +49,7 @@ ENTRY(cpu_arm740_proc_fin) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm740_reset) mov ip, #0 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache @@ -56,6 +57,8 @@ ENTRY(cpu_arm740_reset) bic ip, ip, #0x0000000c @ ............wc.. mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm740_reset) + .popsection __CPUINIT diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 7e0e1fe4ed4d..6ddea3e464bd 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S @@ -45,8 +45,11 @@ ENTRY(cpu_arm7tdmi_proc_fin) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm7tdmi_reset) mov pc, r0 +ENDPROC(cpu_arm7tdmi_reset) + .popsection __CPUINIT diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 88fb3d9e0640..cb941ae95f66 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -85,6 +85,7 @@ ENTRY(cpu_arm920_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm920_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -97,6 +98,8 @@ ENTRY(cpu_arm920_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm920_reset) + .popsection /* * cpu_arm920_do_idle() diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 490e18833857..4ec0e074dd55 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -87,6 +87,7 @@ ENTRY(cpu_arm922_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm922_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -99,6 +100,8 @@ ENTRY(cpu_arm922_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm922_reset) + .popsection /* * cpu_arm922_do_idle() diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 51d494be057e..9dccd9a365b3 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -108,6 +108,7 @@ ENTRY(cpu_arm925_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm925_reset) /* Send software reset to MPU and DSP */ mov ip, #0xff000000 @@ -115,6 +116,8 @@ ENTRY(cpu_arm925_reset) orr ip, ip, #0x0000ce00 mov r4, #1 strh r4, [ip, #0x10] +ENDPROC(cpu_arm925_reset) + .popsection mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 9f8fd91f918a..820259b81a1f 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -77,6 +77,7 @@ ENTRY(cpu_arm926_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_arm926_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -89,6 +90,8 @@ ENTRY(cpu_arm926_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm926_reset) + .popsection /* * cpu_arm926_do_idle() diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index ac750d506153..9fdc0a170974 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S @@ -48,6 +48,7 @@ ENTRY(cpu_arm940_proc_fin) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm940_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache @@ -58,6 +59,8 @@ ENTRY(cpu_arm940_reset) bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm940_reset) + .popsection /* * cpu_arm940_do_idle() diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index 683af3a182b7..f684cfedcca9 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S @@ -55,6 +55,7 @@ ENTRY(cpu_arm946_proc_fin) * Params : r0 = address to jump to * Notes : This sets up everything for a reset */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm946_reset) mov ip, #0 mcr p15, 0, ip, c7, c5, 0 @ flush I cache @@ -65,6 +66,8 @@ ENTRY(cpu_arm946_reset) bic ip, ip, #0x00001000 @ i-cache mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_arm946_reset) + .popsection /* * cpu_arm946_do_idle() diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index 2120f9e2af7f..8881391dfb9e 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S @@ -45,8 +45,11 @@ ENTRY(cpu_arm9tdmi_proc_fin) * Params : loc(r0) address to jump to * Purpose : Sets up everything for a reset and jump to the location for soft reset. */ + .pushsection .idmap.text, "ax" ENTRY(cpu_arm9tdmi_reset) mov pc, r0 +ENDPROC(cpu_arm9tdmi_reset) + .popsection __CPUINIT diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 4c7a5710472b..272558a133a3 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S @@ -57,6 +57,7 @@ ENTRY(cpu_fa526_proc_fin) * loc: location to jump to for soft reset */ .align 4 + .pushsection .idmap.text, "ax" ENTRY(cpu_fa526_reset) /* TODO: Use CP8 if possible... */ mov ip, #0 @@ -73,6 +74,8 @@ ENTRY(cpu_fa526_reset) nop nop mov pc, r0 +ENDPROC(cpu_fa526_reset) + .popsection /* * cpu_fa526_do_idle() diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 8a6c2f78c1c3..ba3c500584ac 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S @@ -98,6 +98,7 @@ ENTRY(cpu_feroceon_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_feroceon_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -110,6 +111,8 @@ ENTRY(cpu_feroceon_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_feroceon_reset) + .popsection /* * cpu_feroceon_do_idle() diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index db52b0fb14a0..cdfedc5b8ad8 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S @@ -69,6 +69,7 @@ ENTRY(cpu_mohawk_proc_fin) * (same as arm926) */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_mohawk_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -79,6 +80,8 @@ ENTRY(cpu_mohawk_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_mohawk_reset) + .popsection /* * cpu_mohawk_do_idle() diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index d50ada26edd6..775d70fba937 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S @@ -62,6 +62,7 @@ ENTRY(cpu_sa110_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_sa110_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -74,6 +75,8 @@ ENTRY(cpu_sa110_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_sa110_reset) + .popsection /* * cpu_sa110_do_idle(type) diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 7d91545d089b..3aa0da11fd84 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -70,6 +70,7 @@ ENTRY(cpu_sa1100_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_sa1100_reset) mov ip, #0 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches @@ -82,6 +83,8 @@ ENTRY(cpu_sa1100_reset) bic ip, ip, #0x1100 @ ...i...s........ mcr p15, 0, ip, c1, c0, 0 @ ctrl register mov pc, r0 +ENDPROC(cpu_sa1100_reset) + .popsection /* * cpu_sa1100_do_idle(type) diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index d061d2fa5506..5900cd520e84 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -55,6 +55,7 @@ ENTRY(cpu_v6_proc_fin) * - loc - location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_v6_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m @@ -62,6 +63,8 @@ ENTRY(cpu_v6_reset) mov r1, #0 mcr p15, 0, r1, c7, c5, 4 @ ISB mov pc, r0 +ENDPROC(cpu_v6_reset) + .popsection /* * cpu_v6_do_idle() diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 2c559ac38142..66a185f018a0 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -63,6 +63,7 @@ ENDPROC(cpu_v7_proc_fin) * caches disabled. */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_v7_reset) mrc p15, 0, r1, c1, c0, 0 @ ctrl register bic r1, r1, #0x1 @ ...............m @@ -71,6 +72,7 @@ ENTRY(cpu_v7_reset) isb mov pc, r0 ENDPROC(cpu_v7_reset) + .popsection /* * cpu_v7_do_idle() diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index abf0507a08ae..b0d57869da2d 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -105,6 +105,7 @@ ENTRY(cpu_xsc3_proc_fin) * loc: location to jump to for soft reset */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_xsc3_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR @@ -119,6 +120,8 @@ ENTRY(cpu_xsc3_reset) @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I and D TLBs mov pc, r0 +ENDPROC(cpu_xsc3_reset) + .popsection /* * cpu_xsc3_do_idle() diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 3277904bebaf..4ffebaa595ee 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -142,6 +142,7 @@ ENTRY(cpu_xscale_proc_fin) * Beware PXA270 erratum E7. */ .align 5 + .pushsection .idmap.text, "ax" ENTRY(cpu_xscale_reset) mov r1, #PSR_F_BIT|PSR_I_BIT|SVC_MODE msr cpsr_c, r1 @ reset CPSR @@ -160,6 +161,8 @@ ENTRY(cpu_xscale_reset) @ already containing those two last instructions to survive. mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs mov pc, r0 +ENDPROC(cpu_xscale_reset) + .popsection /* * cpu_xscale_do_idle() -- cgit v1.2.3 From 2c8951ab0c337cb198236df07ad55f9dd4892c26 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 8 Jun 2011 15:53:34 +0100 Subject: ARM: idmap: use idmap_pgd when setting up mm for reboot For soft-rebooting a system, it is necessary to map the MMU-off code with an identity mapping so that execution can continue safely once the MMU has been switched off. Currently, switch_mm_for_reboot takes out a 1:1 mapping from 0x0 to TASK_SIZE during reboot in the hope that the reset code lives at a physical address corresponding to a userspace virtual address. This patch modifies the code so that we switch to the idmap_pgd tables, which contain a 1:1 mapping of the cpu_reset code. This has the advantage of only remapping the code that we need and also means we don't need to worry about allocating a pgd from an atomic context in the case that the physical address of the cpu_reset code aliases with the virtual space used by the kernel. Acked-by: Dave Martin Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/mm/idmap.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index cda5ea3157a7..b01760e6da18 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -100,17 +100,18 @@ static int __init init_static_idmap(void) arch_initcall(init_static_idmap); /* - * In order to soft-boot, we need to insert a 1:1 mapping in place of - * the user-mode pages. This will then ensure that we have predictable - * results when turning the mmu off + * In order to soft-boot, we need to switch to a 1:1 mapping for the + * cpu_reset functions. This will then ensure that we have predictable + * results when turning off the mmu. */ void setup_mm_for_reboot(void) { - /* - * We need to access to user-mode page tables here. For kernel threads - * we don't have any user-mode mappings so we use the context that we - * "borrowed". - */ - identity_mapping_add(current->active_mm->pgd, 0, TASK_SIZE); + /* Clean and invalidate L1. */ + flush_cache_all(); + + /* Switch to the identity mapping. */ + cpu_switch_mm(idmap_pgd, &init_mm); + + /* Flush the TLB. */ local_flush_tlb_all(); } -- cgit v1.2.3 From 4e8ee7de227e3ab9a72040b448ad728c5428a042 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 23 Nov 2011 12:26:25 +0000 Subject: ARM: SMP: use idmap_pgd for mapping MMU enable during secondary booting The ARM SMP booting code allocates a temporary set of page tables containing an identity mapping of the kernel image and provides this to secondary CPUs for initial booting. In reality, we only need to include the __turn_mmu_on function in the identity mapping since the rest of the kernel is executing from virtual addresses after this point. This patch adds __turn_mmu_on to the .idmap.text section, allowing the SMP booting code to use the idmap_pgd directly and not have to populate its own set of page table. As a result of this patch, we can make the identity_mapping_add function static (since it is only used within mm/idmap.c) and also remove the identity_mapping_del function. The identity map population is moved to an early initcall so that it is setup in time for secondary CPU bringup. Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm/include/asm/idmap.h | 3 --- arch/arm/kernel/head.S | 4 +++- arch/arm/kernel/smp.c | 31 +------------------------------ arch/arm/mm/idmap.c | 34 ++-------------------------------- 4 files changed, 6 insertions(+), 66 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/idmap.h b/arch/arm/include/asm/idmap.h index 62e3d19c9ad7..bf863edb517d 100644 --- a/arch/arm/include/asm/idmap.h +++ b/arch/arm/include/asm/idmap.h @@ -9,9 +9,6 @@ extern pgd_t *idmap_pgd; -void identity_mapping_add(pgd_t *, unsigned long, unsigned long); -void identity_mapping_del(pgd_t *, unsigned long, unsigned long); - void setup_mm_for_reboot(void); #endif /* __ASM_IDMAP_H */ diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 43e3aa3b0573..64e9943ea4f0 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -398,7 +398,8 @@ ENDPROC(__enable_mmu) * other registers depend on the function called upon completion */ .align 5 -__turn_mmu_on: + .pushsection .idmap.text, "ax" +ENTRY(__turn_mmu_on) mov r0, r0 mcr p15, 0, r0, c1, c0, 0 @ write control reg mrc p15, 0, r3, c0, c0, 0 @ read id reg @@ -407,6 +408,7 @@ __turn_mmu_on: mov pc, r3 __turn_mmu_on_end: ENDPROC(__turn_mmu_on) + .popsection #ifdef CONFIG_SMP_ON_UP diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 8afadda37459..76ff28d87bf3 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -62,7 +62,6 @@ int __cpuinit __cpu_up(unsigned int cpu) { struct cpuinfo_arm *ci = &per_cpu(cpu_data, cpu); struct task_struct *idle = ci->idle; - pgd_t *pgd; int ret; /* @@ -84,30 +83,12 @@ int __cpuinit __cpu_up(unsigned int cpu) init_idle(idle, cpu); } - /* - * Allocate initial page tables to allow the new CPU to - * enable the MMU safely. This essentially means a set - * of our "standard" page tables, with the addition of - * a 1:1 mapping for the physical address of the kernel. - */ - pgd = pgd_alloc(&init_mm); - if (!pgd) - return -ENOMEM; - - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_add(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_add(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_add(pgd, __pa(_sdata), __pa(_edata)); - } - /* * We need to tell the secondary core where to find * its stack and the page tables. */ secondary_data.stack = task_stack_page(idle) + THREAD_START_SP; - secondary_data.pgdir = virt_to_phys(pgd); + secondary_data.pgdir = virt_to_phys(idmap_pgd); secondary_data.swapper_pg_dir = virt_to_phys(swapper_pg_dir); __cpuc_flush_dcache_area(&secondary_data, sizeof(secondary_data)); outer_clean_range(__pa(&secondary_data), __pa(&secondary_data + 1)); @@ -143,16 +124,6 @@ int __cpuinit __cpu_up(unsigned int cpu) secondary_data.stack = NULL; secondary_data.pgdir = 0; - if (PHYS_OFFSET != PAGE_OFFSET) { -#ifndef CONFIG_HOTPLUG_CPU - identity_mapping_del(pgd, __pa(__init_begin), __pa(__init_end)); -#endif - identity_mapping_del(pgd, __pa(_stext), __pa(_etext)); - identity_mapping_del(pgd, __pa(_sdata), __pa(_edata)); - } - - pgd_free(&init_mm, pgd); - return ret; } diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index b01760e6da18..660f1bc68f99 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -32,7 +32,7 @@ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, } while (pud++, addr = next, addr != end); } -void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) +static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) { unsigned long prot, next; @@ -47,36 +47,6 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) } while (pgd++, addr = next, addr != end); } -#ifdef CONFIG_SMP -static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) -{ - pmd_t *pmd = pmd_offset(pud, addr); - pmd_clear(pmd); -} - -static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) -{ - pud_t *pud = pud_offset(pgd, addr); - unsigned long next; - - do { - next = pud_addr_end(addr, end); - idmap_del_pmd(pud, addr, next); - } while (pud++, addr = next, addr != end); -} - -void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) -{ - unsigned long next; - - pgd += pgd_index(addr); - do { - next = pgd_addr_end(addr, end); - idmap_del_pud(pgd, addr, next); - } while (pgd++, addr = next, addr != end); -} -#endif - extern char __idmap_text_start[], __idmap_text_end[]; static int __init init_static_idmap(void) @@ -97,7 +67,7 @@ static int __init init_static_idmap(void) return 0; } -arch_initcall(init_static_idmap); +early_initcall(init_static_idmap); /* * In order to soft-boot, we need to switch to a 1:1 mapping for the -- cgit v1.2.3 From 03a6b8274cc61fb9bb77aaa102e63840461c5f3a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:27 +0000 Subject: ARM: pgtable: Fix compiler warning in ioremap.c introduced by nopud With the arch/arm code conversion to pgtable-nopud.h, the section and supersection (un|re)map code triggers compiler warnings on UP systems. This is caused by pmd_offset() being given a pgd_t argument rather than a pud_t one. This patch makes the necessary conversion with the assumption that the pud is folded into the pgd. The page table setting code only loops over the pmd which is enough with the classic page tables. This code is not compiled when LPAE is enabled. Signed-off-by: Catalin Marinas --- arch/arm/mm/ioremap.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index bdb248c4f55c..c3fa40da3b75 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -79,13 +79,16 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) { unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1)); pgd_t *pgd; + pud_t *pud; + pmd_t *pmdp; flush_cache_vunmap(addr, end); pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmdp = pmd_offset(pud, addr); do { - pmd_t pmd, *pmdp = pmd_offset(pgd, addr); + pmd_t pmd = *pmdp; - pmd = *pmdp; if (!pmd_none(pmd)) { /* * Clear the PMD from the page table, and @@ -104,8 +107,8 @@ static void unmap_area_sections(unsigned long virt, unsigned long size) pte_free_kernel(&init_mm, pmd_page_vaddr(pmd)); } - addr += PGDIR_SIZE; - pgd++; + addr += PMD_SIZE; + pmdp += 2; } while (addr < end); /* @@ -124,6 +127,8 @@ remap_area_sections(unsigned long virt, unsigned long pfn, { unsigned long addr = virt, end = virt + size; pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; /* * Remove and free any PTE-based mapping, and @@ -132,17 +137,17 @@ remap_area_sections(unsigned long virt, unsigned long pfn, unmap_area_sections(virt, size); pgd = pgd_offset_k(addr); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); do { - pmd_t *pmd = pmd_offset(pgd, addr); - pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pfn += SZ_1M >> PAGE_SHIFT; pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect); pfn += SZ_1M >> PAGE_SHIFT; flush_pmd_entry(pmd); - addr += PGDIR_SIZE; - pgd++; + addr += PMD_SIZE; + pmd += 2; } while (addr < end); return 0; @@ -154,6 +159,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, { unsigned long addr = virt, end = virt + size; pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; /* * Remove and free any PTE-based mapping, and @@ -162,6 +169,8 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, unmap_area_sections(virt, size); pgd = pgd_offset_k(virt); + pud = pud_offset(pgd, addr); + pmd = pmd_offset(pud, addr); do { unsigned long super_pmd_val, i; @@ -170,14 +179,12 @@ remap_area_supersections(unsigned long virt, unsigned long pfn, super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20; for (i = 0; i < 8; i++) { - pmd_t *pmd = pmd_offset(pgd, addr); - pmd[0] = __pmd(super_pmd_val); pmd[1] = __pmd(super_pmd_val); flush_pmd_entry(pmd); - addr += PGDIR_SIZE; - pgd++; + addr += PMD_SIZE; + pmd += 2; } pfn += SUPERSECTION_SIZE >> PAGE_SHIFT; -- cgit v1.2.3 From 136848d4ca9cf6f08edf6e50cb9bbe19de55c32a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:28 +0000 Subject: ARM: LPAE: Move the FSR definitions to separate files The FSR structure is different with LPAE and this patch moves the classic MMU specific definition to a separate fsr-2level.c file that is included in fault.c. It also moves the fsr_fs and FSR bits to the fault.h file. Signed-off-by: Catalin Marinas --- arch/arm/mm/fault.c | 96 ++---------------------------------------------- arch/arm/mm/fault.h | 19 +++++++++- arch/arm/mm/fsr-2level.c | 78 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 100 insertions(+), 93 deletions(-) create mode 100644 arch/arm/mm/fsr-2level.c (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index aa33949fef60..2a0271677725 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -27,19 +27,6 @@ #include "fault.h" -/* - * Fault status register encodings. We steal bit 31 for our own purposes. - */ -#define FSR_LNX_PF (1 << 31) -#define FSR_WRITE (1 << 11) -#define FSR_FS4 (1 << 10) -#define FSR_FS3_0 (15) - -static inline int fsr_fs(unsigned int fsr) -{ - return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; -} - #ifdef CONFIG_MMU #ifdef CONFIG_KPROBES @@ -489,55 +476,16 @@ do_bad(unsigned long addr, unsigned int fsr, struct pt_regs *regs) return 1; } -static struct fsr_info { +struct fsr_info { int (*fn)(unsigned long addr, unsigned int fsr, struct pt_regs *regs); int sig; int code; const char *name; -} fsr_info[] = { - /* - * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 - * defines these to be "precise" aborts. - */ - { do_bad, SIGSEGV, 0, "vector exception" }, - { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, - { do_bad, SIGKILL, 0, "terminal exception" }, - { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, - { do_bad, SIGBUS, 0, "external abort on linefetch" }, - { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, - { do_bad, SIGBUS, 0, "external abort on linefetch" }, - { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, - { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, - { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, - { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, - { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, - /* - * The following are "imprecise" aborts, which are signalled by bit - * 10 of the FSR, and may not be recoverable. These are only - * supported if the CPU abort handler supports bit 10. - */ - { do_bad, SIGBUS, 0, "unknown 16" }, - { do_bad, SIGBUS, 0, "unknown 17" }, - { do_bad, SIGBUS, 0, "unknown 18" }, - { do_bad, SIGBUS, 0, "unknown 19" }, - { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ - { do_bad, SIGBUS, 0, "unknown 21" }, - { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ - { do_bad, SIGBUS, 0, "unknown 23" }, - { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ - { do_bad, SIGBUS, 0, "unknown 25" }, - { do_bad, SIGBUS, 0, "unknown 26" }, - { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "unknown 28" }, - { do_bad, SIGBUS, 0, "unknown 29" }, - { do_bad, SIGBUS, 0, "unknown 30" }, - { do_bad, SIGBUS, 0, "unknown 31" } }; +/* FSR definition */ +#include "fsr-2level.c" + void __init hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name) @@ -573,42 +521,6 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) arm_notify_die("", regs, &info, fsr, 0); } - -static struct fsr_info ifsr_info[] = { - { do_bad, SIGBUS, 0, "unknown 0" }, - { do_bad, SIGBUS, 0, "unknown 1" }, - { do_bad, SIGBUS, 0, "debug event" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, - { do_bad, SIGBUS, 0, "unknown 4" }, - { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, - { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, - { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, - { do_bad, SIGBUS, 0, "unknown 10" }, - { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, - { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, - { do_bad, SIGBUS, 0, "external abort on translation" }, - { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, - { do_bad, SIGBUS, 0, "unknown 16" }, - { do_bad, SIGBUS, 0, "unknown 17" }, - { do_bad, SIGBUS, 0, "unknown 18" }, - { do_bad, SIGBUS, 0, "unknown 19" }, - { do_bad, SIGBUS, 0, "unknown 20" }, - { do_bad, SIGBUS, 0, "unknown 21" }, - { do_bad, SIGBUS, 0, "unknown 22" }, - { do_bad, SIGBUS, 0, "unknown 23" }, - { do_bad, SIGBUS, 0, "unknown 24" }, - { do_bad, SIGBUS, 0, "unknown 25" }, - { do_bad, SIGBUS, 0, "unknown 26" }, - { do_bad, SIGBUS, 0, "unknown 27" }, - { do_bad, SIGBUS, 0, "unknown 28" }, - { do_bad, SIGBUS, 0, "unknown 29" }, - { do_bad, SIGBUS, 0, "unknown 30" }, - { do_bad, SIGBUS, 0, "unknown 31" }, -}; - void __init hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name) diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 49e9e3804de4..25b45c105be2 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -1,3 +1,20 @@ -void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); +#ifndef __ARCH_ARM_FAULT_H +#define __ARCH_ARM_FAULT_H + +/* + * Fault status register encodings. We steal bit 31 for our own purposes. + */ +#define FSR_LNX_PF (1 << 31) +#define FSR_WRITE (1 << 11) +#define FSR_FS4 (1 << 10) +#define FSR_FS3_0 (15) + +static inline int fsr_fs(unsigned int fsr) +{ + return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; +} +void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); unsigned long search_exception_table(unsigned long addr); + +#endif /* __ARCH_ARM_FAULT_H */ diff --git a/arch/arm/mm/fsr-2level.c b/arch/arm/mm/fsr-2level.c new file mode 100644 index 000000000000..18ca74c0f341 --- /dev/null +++ b/arch/arm/mm/fsr-2level.c @@ -0,0 +1,78 @@ +static struct fsr_info fsr_info[] = { + /* + * The following are the standard ARMv3 and ARMv4 aborts. ARMv5 + * defines these to be "precise" aborts. + */ + { do_bad, SIGSEGV, 0, "vector exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGKILL, 0, "terminal exception" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment exception" }, + { do_bad, SIGBUS, 0, "external abort on linefetch" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, + { do_bad, SIGBUS, 0, "external abort on linefetch" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, + /* + * The following are "imprecise" aborts, which are signalled by bit + * 10 of the FSR, and may not be recoverable. These are only + * supported if the CPU abort handler supports bit 10. + */ + { do_bad, SIGBUS, 0, "unknown 16" }, + { do_bad, SIGBUS, 0, "unknown 17" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "lock abort" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 21" }, + { do_bad, SIGBUS, BUS_OBJERR, "imprecise external abort" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 23" }, + { do_bad, SIGBUS, 0, "dcache parity error" }, /* xscale */ + { do_bad, SIGBUS, 0, "unknown 25" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "unknown 28" }, + { do_bad, SIGBUS, 0, "unknown 29" }, + { do_bad, SIGBUS, 0, "unknown 30" }, + { do_bad, SIGBUS, 0, "unknown 31" }, +}; + +static struct fsr_info ifsr_info[] = { + { do_bad, SIGBUS, 0, "unknown 0" }, + { do_bad, SIGBUS, 0, "unknown 1" }, + { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, + { do_bad, SIGBUS, 0, "unknown 4" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, + { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, + { do_bad, SIGBUS, 0, "unknown 10" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, + { do_bad, SIGBUS, 0, "external abort on translation" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, + { do_bad, SIGBUS, 0, "unknown 16" }, + { do_bad, SIGBUS, 0, "unknown 17" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "unknown 20" }, + { do_bad, SIGBUS, 0, "unknown 21" }, + { do_bad, SIGBUS, 0, "unknown 22" }, + { do_bad, SIGBUS, 0, "unknown 23" }, + { do_bad, SIGBUS, 0, "unknown 24" }, + { do_bad, SIGBUS, 0, "unknown 25" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "unknown 28" }, + { do_bad, SIGBUS, 0, "unknown 29" }, + { do_bad, SIGBUS, 0, "unknown 30" }, + { do_bad, SIGBUS, 0, "unknown 31" }, +}; -- cgit v1.2.3 From 8d2cd3a38fd663bd341507f5ac29002ffd81d986 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:28 +0000 Subject: ARM: LPAE: Factor out classic-MMU specific code into proc-v7-2level.S This patch modifies the proc-v7.S file so that it only contains code shared between classic MMU and LPAE. The non-common code is factored out into a separate file. Signed-off-by: Catalin Marinas --- arch/arm/mm/proc-v7-2level.S | 171 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-v7.S | 152 +------------------------------------- 2 files changed, 174 insertions(+), 149 deletions(-) create mode 100644 arch/arm/mm/proc-v7-2level.S (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S new file mode 100644 index 000000000000..3a4b3e7b888c --- /dev/null +++ b/arch/arm/mm/proc-v7-2level.S @@ -0,0 +1,171 @@ +/* + * arch/arm/mm/proc-v7-2level.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define TTB_S (1 << 1) +#define TTB_RGN_NC (0 << 3) +#define TTB_RGN_OC_WBWA (1 << 3) +#define TTB_RGN_OC_WT (2 << 3) +#define TTB_RGN_OC_WB (3 << 3) +#define TTB_NOS (1 << 5) +#define TTB_IRGN_NC ((0 << 0) | (0 << 6)) +#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) +#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) +#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) + +/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ +#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB +#define PMD_FLAGS_UP PMD_SECT_WB + +/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ +#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA +#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S + +/* + * cpu_v7_switch_mm(pgd_phys, tsk) + * + * Set the translation table base pointer to be pgd_phys + * + * - pgd_phys - physical address of new TTB + * + * It is assumed that: + * - we are not using split page tables + */ +ENTRY(cpu_v7_switch_mm) +#ifdef CONFIG_MMU + mov r2, #0 + ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) + ALT_UP(orr r0, r0, #TTB_FLAGS_UP) +#ifdef CONFIG_ARM_ERRATA_430973 + mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB +#endif +#ifdef CONFIG_ARM_ERRATA_754322 + dsb +#endif + mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID + isb +1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 + isb +#ifdef CONFIG_ARM_ERRATA_754322 + dsb +#endif + mcr p15, 0, r1, c13, c0, 1 @ set context ID + isb +#endif + mov pc, lr +ENDPROC(cpu_v7_switch_mm) + +/* + * cpu_v7_set_pte_ext(ptep, pte) + * + * Set a level 2 translation table entry. + * + * - ptep - pointer to level 2 translation table entry + * (hardware version is stored at +2048 bytes) + * - pte - PTE value to store + * - ext - value for extended PTE bits + */ +ENTRY(cpu_v7_set_pte_ext) +#ifdef CONFIG_MMU + str r1, [r0] @ linux version + + bic r3, r1, #0x000003f0 + bic r3, r3, #PTE_TYPE_MASK + orr r3, r3, r2 + orr r3, r3, #PTE_EXT_AP0 | 2 + + tst r1, #1 << 4 + orrne r3, r3, #PTE_EXT_TEX(1) + + eor r1, r1, #L_PTE_DIRTY + tst r1, #L_PTE_RDONLY | L_PTE_DIRTY + orrne r3, r3, #PTE_EXT_APX + + tst r1, #L_PTE_USER + orrne r3, r3, #PTE_EXT_AP1 +#ifdef CONFIG_CPU_USE_DOMAINS + @ allow kernel read/write access to read-only user pages + tstne r3, #PTE_EXT_APX + bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 +#endif + + tst r1, #L_PTE_XN + orrne r3, r3, #PTE_EXT_XN + + tst r1, #L_PTE_YOUNG + tstne r1, #L_PTE_PRESENT + moveq r3, #0 + + ARM( str r3, [r0, #2048]! ) + THUMB( add r0, r0, #2048 ) + THUMB( str r3, [r0] ) + mcr p15, 0, r0, c7, c10, 1 @ flush_pte +#endif + mov pc, lr +ENDPROC(cpu_v7_set_pte_ext) + + /* + * Memory region attributes with SCTLR.TRE=1 + * + * n = TEX[0],C,B + * TR = PRRR[2n+1:2n] - memory type + * IR = NMRR[2n+1:2n] - inner cacheable property + * OR = NMRR[2n+17:2n+16] - outer cacheable property + * + * n TR IR OR + * UNCACHED 000 00 + * BUFFERABLE 001 10 00 00 + * WRITETHROUGH 010 10 10 10 + * WRITEBACK 011 10 11 11 + * reserved 110 + * WRITEALLOC 111 10 01 01 + * DEV_SHARED 100 01 + * DEV_NONSHARED 100 01 + * DEV_WC 001 10 + * DEV_CACHED 011 10 + * + * Other attributes: + * + * DS0 = PRRR[16] = 0 - device shareable property + * DS1 = PRRR[17] = 1 - device shareable property + * NS0 = PRRR[18] = 0 - normal shareable property + * NS1 = PRRR[19] = 1 - normal shareable property + * NOS = PRRR[24+n] = 1 - not outer shareable + */ +.equ PRRR, 0xff0a81a8 +.equ NMRR, 0x40e040e0 + + /* + * Macro for setting up the TTBRx and TTBCR registers. + * - \ttb0 and \ttb1 updated with the corresponding flags. + */ + .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp + mcr p15, 0, \zero, c2, c0, 2 @ TTB control register + ALT_SMP(orr \ttbr0, \ttbr0, #TTB_FLAGS_SMP) + ALT_UP(orr \ttbr0, \ttbr0, #TTB_FLAGS_UP) + ALT_SMP(orr \ttbr1, \ttbr1, #TTB_FLAGS_SMP) + ALT_UP(orr \ttbr1, \ttbr1, #TTB_FLAGS_UP) + mcr p15, 0, \ttbr1, c2, c0, 1 @ load TTB1 + .endm + + __CPUINIT + + /* AT + * TFR EV X F I D LR S + * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM + * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced + * 1 0 110 0011 1100 .111 1101 < we want + */ + .align 2 + .type v7_crval, #object +v7_crval: + crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c + + .previous diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 66a185f018a0..ed1a4d115331 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -19,24 +19,7 @@ #include "proc-macros.S" -#define TTB_S (1 << 1) -#define TTB_RGN_NC (0 << 3) -#define TTB_RGN_OC_WBWA (1 << 3) -#define TTB_RGN_OC_WT (2 << 3) -#define TTB_RGN_OC_WB (3 << 3) -#define TTB_NOS (1 << 5) -#define TTB_IRGN_NC ((0 << 0) | (0 << 6)) -#define TTB_IRGN_WBWA ((0 << 0) | (1 << 6)) -#define TTB_IRGN_WT ((1 << 0) | (0 << 6)) -#define TTB_IRGN_WB ((1 << 0) | (1 << 6)) - -/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ -#define TTB_FLAGS_UP TTB_IRGN_WB|TTB_RGN_OC_WB -#define PMD_FLAGS_UP PMD_SECT_WB - -/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ -#define TTB_FLAGS_SMP TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA -#define PMD_FLAGS_SMP PMD_SECT_WBWA|PMD_SECT_S +#include "proc-v7-2level.S" ENTRY(cpu_v7_proc_init) mov pc, lr @@ -99,124 +82,9 @@ ENTRY(cpu_v7_dcache_clean_area) mov pc, lr ENDPROC(cpu_v7_dcache_clean_area) -/* - * cpu_v7_switch_mm(pgd_phys, tsk) - * - * Set the translation table base pointer to be pgd_phys - * - * - pgd_phys - physical address of new TTB - * - * It is assumed that: - * - we are not using split page tables - */ -ENTRY(cpu_v7_switch_mm) -#ifdef CONFIG_MMU - mov r2, #0 - ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id - ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) - ALT_UP(orr r0, r0, #TTB_FLAGS_UP) -#ifdef CONFIG_ARM_ERRATA_430973 - mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB -#endif -#ifdef CONFIG_ARM_ERRATA_754322 - dsb -#endif - mcr p15, 0, r2, c13, c0, 1 @ set reserved context ID - isb -1: mcr p15, 0, r0, c2, c0, 0 @ set TTB 0 - isb -#ifdef CONFIG_ARM_ERRATA_754322 - dsb -#endif - mcr p15, 0, r1, c13, c0, 1 @ set context ID - isb -#endif - mov pc, lr -ENDPROC(cpu_v7_switch_mm) - -/* - * cpu_v7_set_pte_ext(ptep, pte) - * - * Set a level 2 translation table entry. - * - * - ptep - pointer to level 2 translation table entry - * (hardware version is stored at +2048 bytes) - * - pte - PTE value to store - * - ext - value for extended PTE bits - */ -ENTRY(cpu_v7_set_pte_ext) -#ifdef CONFIG_MMU - str r1, [r0] @ linux version - - bic r3, r1, #0x000003f0 - bic r3, r3, #PTE_TYPE_MASK - orr r3, r3, r2 - orr r3, r3, #PTE_EXT_AP0 | 2 - - tst r1, #1 << 4 - orrne r3, r3, #PTE_EXT_TEX(1) - - eor r1, r1, #L_PTE_DIRTY - tst r1, #L_PTE_RDONLY | L_PTE_DIRTY - orrne r3, r3, #PTE_EXT_APX - - tst r1, #L_PTE_USER - orrne r3, r3, #PTE_EXT_AP1 -#ifdef CONFIG_CPU_USE_DOMAINS - @ allow kernel read/write access to read-only user pages - tstne r3, #PTE_EXT_APX - bicne r3, r3, #PTE_EXT_APX | PTE_EXT_AP0 -#endif - - tst r1, #L_PTE_XN - orrne r3, r3, #PTE_EXT_XN - - tst r1, #L_PTE_YOUNG - tstne r1, #L_PTE_PRESENT - moveq r3, #0 - - ARM( str r3, [r0, #2048]! ) - THUMB( add r0, r0, #2048 ) - THUMB( str r3, [r0] ) - mcr p15, 0, r0, c7, c10, 1 @ flush_pte -#endif - mov pc, lr -ENDPROC(cpu_v7_set_pte_ext) - string cpu_v7_name, "ARMv7 Processor" .align - /* - * Memory region attributes with SCTLR.TRE=1 - * - * n = TEX[0],C,B - * TR = PRRR[2n+1:2n] - memory type - * IR = NMRR[2n+1:2n] - inner cacheable property - * OR = NMRR[2n+17:2n+16] - outer cacheable property - * - * n TR IR OR - * UNCACHED 000 00 - * BUFFERABLE 001 10 00 00 - * WRITETHROUGH 010 10 10 10 - * WRITEBACK 011 10 11 11 - * reserved 110 - * WRITEALLOC 111 10 01 01 - * DEV_SHARED 100 01 - * DEV_NONSHARED 100 01 - * DEV_WC 001 10 - * DEV_CACHED 011 10 - * - * Other attributes: - * - * DS0 = PRRR[16] = 0 - device shareable property - * DS1 = PRRR[17] = 1 - device shareable property - * NS0 = PRRR[18] = 0 - normal shareable property - * NS1 = PRRR[19] = 1 - normal shareable property - * NOS = PRRR[24+n] = 1 - not outer shareable - */ -.equ PRRR, 0xff0a81a8 -.equ NMRR, 0x40e040e0 - /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 7 @@ -379,12 +247,7 @@ __v7_setup: dsb #ifdef CONFIG_MMU mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs - mcr p15, 0, r10, c2, c0, 2 @ TTB control register - ALT_SMP(orr r4, r4, #TTB_FLAGS_SMP) - ALT_UP(orr r4, r4, #TTB_FLAGS_UP) - ALT_SMP(orr r8, r8, #TTB_FLAGS_SMP) - ALT_UP(orr r8, r8, #TTB_FLAGS_UP) - mcr p15, 0, r8, c2, c0, 1 @ load TTB1 + v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup ldr r5, =PRRR @ PRRR ldr r6, =NMRR @ NMRR mcr p15, 0, r5, c10, c2, 0 @ write PRRR @@ -406,16 +269,7 @@ __v7_setup: mov pc, lr @ return to head.S:__ret ENDPROC(__v7_setup) - /* AT - * TFR EV X F I D LR S - * .EEE ..EE PUI. .T.T 4RVI ZWRS BLDP WCAM - * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced - * 1 0 110 0011 1100 .111 1101 < we want - */ - .type v7_crval, #object -v7_crval: - crval clear=0x0120c302, mmuset=0x10c03c7d, ucset=0x00c01c7c - + .align 2 __v7_setup_stack: .space 4 * 11 @ 11 registers -- cgit v1.2.3 From da02877987e6e173ebba137d4e1e155e1f1151cd Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:29 +0000 Subject: ARM: LPAE: Page table maintenance for the 3-level format This patch modifies the pgd/pmd/pte manipulation functions to support the 3-level page table format. Since there is no need for an 'ext' argument to cpu_set_pte_ext(), this patch conditionally defines a different prototype for this function when CONFIG_ARM_LPAE. The patch also introduces the L_PGD_SWAPPER flag to mark pgd entries pointing to pmd tables pre-allocated in the swapper_pg_dir and avoid trying to free them at run-time. This flag is 0 with the classic page table format. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/pgalloc.h | 24 ++++++++++++++++ arch/arm/include/asm/pgtable-3level.h | 53 +++++++++++++++++++++++++++++++++++ arch/arm/include/asm/proc-fns.h | 21 ++++++++++++++ arch/arm/mm/ioremap.c | 8 ++++-- arch/arm/mm/pgd.c | 51 ++++++++++++++++++++++++++++++--- 5 files changed, 150 insertions(+), 7 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h index 7418894a737f..943504f53f57 100644 --- a/arch/arm/include/asm/pgalloc.h +++ b/arch/arm/include/asm/pgalloc.h @@ -25,6 +25,26 @@ #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_USER)) #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_BIT4 | PMD_DOMAIN(DOMAIN_KERNEL)) +#ifdef CONFIG_ARM_LPAE + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return (pmd_t *)get_zeroed_page(GFP_KERNEL | __GFP_REPEAT); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + BUG_ON((unsigned long)pmd & (PAGE_SIZE-1)); + free_page((unsigned long)pmd); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE)); +} + +#else /* !CONFIG_ARM_LPAE */ + /* * Since we have only two-level page tables, these are trivial */ @@ -32,6 +52,8 @@ #define pmd_free(mm, pmd) do { } while (0) #define pud_populate(mm,pmd,pte) BUG() +#endif /* CONFIG_ARM_LPAE */ + extern pgd_t *pgd_alloc(struct mm_struct *mm); extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); @@ -109,7 +131,9 @@ static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte, { pmdval_t pmdval = (pte + PTE_HWTABLE_OFF) | prot; pmdp[0] = __pmd(pmdval); +#ifndef CONFIG_ARM_LPAE pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t)); +#endif flush_pmd_entry(pmdp); } diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h index 79bf0acc40db..759af70f9a0a 100644 --- a/arch/arm/include/asm/pgtable-3level.h +++ b/arch/arm/include/asm/pgtable-3level.h @@ -99,4 +99,57 @@ #define L_PTE_MT_DEV_CACHED (_AT(pteval_t, 3) << 2) /* normal inner write-back */ #define L_PTE_MT_MASK (_AT(pteval_t, 7) << 2) +/* + * Software PGD flags. + */ +#define L_PGD_SWAPPER (_AT(pgdval_t, 1) << 55) /* swapper_pg_dir entry */ + +#ifndef __ASSEMBLY__ + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!(pud_val(pud) & 2)) +#define pud_present(pud) (pud_val(pud)) + +#define pud_clear(pudp) \ + do { \ + *pudp = __pud(0); \ + clean_pmd_entry(pudp); \ + } while (0) + +#define set_pud(pudp, pud) \ + do { \ + *pudp = pud; \ + flush_pmd_entry(pudp); \ + } while (0) + +static inline pmd_t *pud_page_vaddr(pud_t pud) +{ + return __va(pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK); +} + +/* Find an entry in the second-level page table.. */ +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) +static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr) +{ + return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr); +} + +#define pmd_bad(pmd) (!(pmd_val(pmd) & 2)) + +#define copy_pmd(pmdpd,pmdps) \ + do { \ + *pmdpd = *pmdps; \ + flush_pmd_entry(pmdpd); \ + } while (0) + +#define pmd_clear(pmdp) \ + do { \ + *pmdp = __pmd(0); \ + clean_pmd_entry(pmdp); \ + } while (0) + +#define set_pte_ext(ptep,pte,ext) cpu_set_pte_ext(ptep,__pte(pte_val(pte)|(ext))) + +#endif /* __ASSEMBLY__ */ + #endif /* _ASM_PGTABLE_3LEVEL_H */ diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h index 9e92cb205e65..f3628fb3d2b3 100644 --- a/arch/arm/include/asm/proc-fns.h +++ b/arch/arm/include/asm/proc-fns.h @@ -65,7 +65,11 @@ extern struct processor { * Set a possibly extended PTE. Non-extended PTEs should * ignore 'ext'. */ +#ifdef CONFIG_ARM_LPAE + void (*set_pte_ext)(pte_t *ptep, pte_t pte); +#else void (*set_pte_ext)(pte_t *ptep, pte_t pte, unsigned int ext); +#endif /* Suspend/resume */ unsigned int suspend_size; @@ -79,7 +83,11 @@ extern void cpu_proc_fin(void); extern int cpu_do_idle(void); extern void cpu_dcache_clean_area(void *, int); extern void cpu_do_switch_mm(unsigned long pgd_phys, struct mm_struct *mm); +#ifdef CONFIG_ARM_LPAE +extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte); +#else extern void cpu_set_pte_ext(pte_t *ptep, pte_t pte, unsigned int ext); +#endif extern void cpu_reset(unsigned long addr) __attribute__((noreturn)); /* These three are private to arch/arm/kernel/suspend.c */ @@ -107,6 +115,18 @@ extern void cpu_resume(void); #define cpu_switch_mm(pgd,mm) cpu_do_switch_mm(virt_to_phys(pgd),mm) +#ifdef CONFIG_ARM_LPAE +#define cpu_get_pgd() \ + ({ \ + unsigned long pg, pg2; \ + __asm__("mrrc p15, 0, %0, %1, c2" \ + : "=r" (pg), "=r" (pg2) \ + : \ + : "cc"); \ + pg &= ~(PTRS_PER_PGD*sizeof(pgd_t)-1); \ + (pgd_t *)phys_to_virt(pg); \ + }) +#else #define cpu_get_pgd() \ ({ \ unsigned long pg; \ @@ -115,6 +135,7 @@ extern void cpu_resume(void); pg &= ~0x3fff; \ (pgd_t *)phys_to_virt(pg); \ }) +#endif #endif diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c index c3fa40da3b75..d1f78bacb015 100644 --- a/arch/arm/mm/ioremap.c +++ b/arch/arm/mm/ioremap.c @@ -64,7 +64,7 @@ void __check_kvm_seq(struct mm_struct *mm) } while (seq != init_mm.context.kvm_seq); } -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) /* * Section support is unsafe on SMP - If you iounmap and ioremap a region, * the other CPUs will not see this change until their next context switch. @@ -202,11 +202,13 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, unsigned long addr; struct vm_struct * area; +#ifndef CONFIG_ARM_LPAE /* * High mappings must be supersection aligned */ if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) return NULL; +#endif /* * Don't allow RAM to be mapped - this causes problems with ARMv6+ @@ -228,7 +230,7 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, return NULL; addr = (unsigned long)area->addr; -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) if (DOMAIN_IO == 0 && (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || cpu_is_xsc3()) && pfn >= 0x100000 && @@ -320,7 +322,7 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) void __iounmap(volatile void __iomem *io_addr) { void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); -#ifndef CONFIG_SMP +#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) struct vm_struct **p, *tmp; /* diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index b2027c154b2a..a3e78ccabd65 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -10,6 +10,7 @@ #include #include #include +#include #include #include @@ -17,6 +18,14 @@ #include "mm.h" +#ifdef CONFIG_ARM_LPAE +#define __pgd_alloc() kmalloc(PTRS_PER_PGD * sizeof(pgd_t), GFP_KERNEL) +#define __pgd_free(pgd) kfree(pgd) +#else +#define __pgd_alloc() (pgd_t *)__get_free_pages(GFP_KERNEL, 2) +#define __pgd_free(pgd) free_pages((unsigned long)pgd, 2) +#endif + /* * need to get a 16k page for level 1 */ @@ -27,7 +36,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; - new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); + new_pgd = __pgd_alloc(); if (!new_pgd) goto no_pgd; @@ -42,10 +51,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm) clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); +#ifdef CONFIG_ARM_LPAE + /* + * Allocate PMD table for modules and pkmap mappings. + */ + new_pud = pud_alloc(mm, new_pgd + pgd_index(MODULES_VADDR), + MODULES_VADDR); + if (!new_pud) + goto no_pud; + + new_pmd = pmd_alloc(mm, new_pud, 0); + if (!new_pmd) + goto no_pmd; +#endif + if (!vectors_high()) { /* * On ARM, first page must always be allocated since it - * contains the machine vectors. + * contains the machine vectors. The vectors are always high + * with LPAE. */ new_pud = pud_alloc(mm, new_pgd, 0); if (!new_pud) @@ -74,7 +98,7 @@ no_pte: no_pmd: pud_free(mm, new_pud); no_pud: - free_pages((unsigned long)new_pgd, 2); + __pgd_free(new_pgd); no_pgd: return NULL; } @@ -111,5 +135,24 @@ no_pud: pgd_clear(pgd); pud_free(mm, pud); no_pgd: - free_pages((unsigned long) pgd_base, 2); +#ifdef CONFIG_ARM_LPAE + /* + * Free modules/pkmap or identity pmd tables. + */ + for (pgd = pgd_base; pgd < pgd_base + PTRS_PER_PGD; pgd++) { + if (pgd_none_or_clear_bad(pgd)) + continue; + if (pgd_val(*pgd) & L_PGD_SWAPPER) + continue; + pud = pud_offset(pgd, 0); + if (pud_none_or_clear_bad(pud)) + continue; + pmd = pmd_offset(pud, 0); + pud_clear(pud); + pmd_free(mm, pmd); + pgd_clear(pgd); + pud_free(mm, pud); + } +#endif + __pgd_free(pgd_base); } -- cgit v1.2.3 From 1b6ba46b7efa31055eb993a6f2c6bbcb8b35b001 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:29 +0000 Subject: ARM: LPAE: MMU setup for the 3-level page table format This patch adds the MMU initialisation for the LPAE page table format. The swapper_pg_dir size with LPAE is 5 rather than 4 pages. A new proc-v7-3level.S file contains the TTB initialisation, context switch and PTE setting code with the LPAE. The TTBRx split is based on the PAGE_OFFSET with TTBR1 used for the kernel mappings. The 36-bit mappings (supersections) and a few other memory types in mmu.c are conditionally compiled. Signed-off-by: Catalin Marinas --- arch/arm/kernel/head.S | 45 ++++++++++++- arch/arm/mm/mmu.c | 30 +++++++++ arch/arm/mm/proc-macros.S | 5 +- arch/arm/mm/proc-v7-3level.S | 150 +++++++++++++++++++++++++++++++++++++++++++ arch/arm/mm/proc-v7.S | 25 +++++--- 5 files changed, 243 insertions(+), 12 deletions(-) create mode 100644 arch/arm/mm/proc-v7-3level.S (limited to 'arch/arm/mm') diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 54eb94aff6cd..c8e797baaf55 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -39,8 +39,14 @@ #error KERNEL_RAM_VADDR must start at 0xXXXX8000 #endif +#ifdef CONFIG_ARM_LPAE + /* LPAE requires an additional page for the PGD */ +#define PG_DIR_SIZE 0x5000 +#define PMD_ORDER 3 +#else #define PG_DIR_SIZE 0x4000 #define PMD_ORDER 2 +#endif .globl swapper_pg_dir .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE @@ -164,6 +170,25 @@ __create_page_tables: teq r0, r6 bne 1b +#ifdef CONFIG_ARM_LPAE + /* + * Build the PGD table (first level) to point to the PMD table. A PGD + * entry is 64-bit wide. + */ + mov r0, r4 + add r3, r4, #0x1000 @ first PMD table address + orr r3, r3, #3 @ PGD block type + mov r6, #4 @ PTRS_PER_PGD + mov r7, #1 << (55 - 32) @ L_PGD_SWAPPER +1: str r3, [r0], #4 @ set bottom PGD entry bits + str r7, [r0], #4 @ set top PGD entry bits + add r3, r3, #0x1000 @ next PMD table + subs r6, r6, #1 + bne 1b + + add r4, r4, #0x1000 @ point to the PMD tables +#endif + ldr r7, [r10, #PROCINFO_MM_MMUFLAGS] @ mm_mmuflags /* @@ -219,8 +244,8 @@ __create_page_tables: #endif /* - * Then map boot params address in r2 or - * the first 1MB of ram if boot params address is not specified. + * Then map boot params address in r2 or the first 1MB (2MB with LPAE) + * of ram if boot params address is not specified. */ mov r0, r2, lsr #SECTION_SHIFT movs r0, r0, lsl #SECTION_SHIFT @@ -251,7 +276,15 @@ __create_page_tables: mov r3, r7, lsr #SECTION_SHIFT ldr r7, [r10, #PROCINFO_IO_MMUFLAGS] @ io_mmuflags orr r3, r7, r3, lsl #SECTION_SHIFT +#ifdef CONFIG_ARM_LPAE + mov r7, #1 << (54 - 32) @ XN +#else + orr r3, r3, #PMD_SECT_XN +#endif 1: str r3, [r0], #4 +#ifdef CONFIG_ARM_LPAE + str r7, [r0], #4 +#endif add r3, r3, #1 << SECTION_SHIFT cmp r0, r6 blo 1b @@ -282,6 +315,9 @@ __create_page_tables: add r0, r4, #0xd8000000 >> (SECTION_SHIFT - PMD_ORDER) str r3, [r0] #endif +#endif +#ifdef CONFIG_ARM_LPAE + sub r4, r4, #0x1000 @ point to the PGD table #endif mov pc, lr ENDPROC(__create_page_tables) @@ -374,12 +410,17 @@ __enable_mmu: #ifdef CONFIG_CPU_ICACHE_DISABLE bic r0, r0, #CR_I #endif +#ifdef CONFIG_ARM_LPAE + mov r5, #0 + mcrr p15, 0, r4, r5, c2 @ load TTBR0 +#else mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \ domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \ domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \ domain_val(DOMAIN_IO, DOMAIN_CLIENT)) mcr p15, 0, r5, c3, c0, 0 @ load domain access register mcr p15, 0, r4, c2, c0, 0 @ load page table pointer +#endif b __turn_mmu_on ENDPROC(__enable_mmu) diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index dc8c550e6cbd..1935311e17fc 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -150,6 +150,7 @@ static int __init early_nowrite(char *__unused) } early_param("nowb", early_nowrite); +#ifndef CONFIG_ARM_LPAE static int __init early_ecc(char *p) { if (memcmp(p, "on", 2) == 0) @@ -159,6 +160,7 @@ static int __init early_ecc(char *p) return 0; } early_param("ecc", early_ecc); +#endif static int __init noalign_setup(char *__unused) { @@ -228,10 +230,12 @@ static struct mem_type mem_types[] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, }, +#ifndef CONFIG_ARM_LPAE [MT_MINICLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, .domain = DOMAIN_KERNEL, }, +#endif [MT_LOW_VECTORS] = { .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY, @@ -429,6 +433,7 @@ static void __init build_mem_type_table(void) * ARMv6 and above have extended page tables. */ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { +#ifndef CONFIG_ARM_LPAE /* * Mark cache clean areas and XIP ROM read only * from SVC mode and no access from userspace. @@ -436,6 +441,7 @@ static void __init build_mem_type_table(void) mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; +#endif if (is_smp()) { /* @@ -474,6 +480,18 @@ static void __init build_mem_type_table(void) mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; } +#ifdef CONFIG_ARM_LPAE + /* + * Do not generate access flag faults for the kernel mappings. + */ + for (i = 0; i < ARRAY_SIZE(mem_types); i++) { + mem_types[i].prot_pte |= PTE_EXT_AF; + mem_types[i].prot_sect |= PMD_SECT_AF; + } + kern_pgprot |= PTE_EXT_AF; + vecs_pgprot |= PTE_EXT_AF; +#endif + for (i = 0; i < 16; i++) { unsigned long v = pgprot_val(protection_map[i]); protection_map[i] = __pgprot(v | user_pgprot); @@ -572,8 +590,10 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr, if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; +#ifndef CONFIG_ARM_LPAE if (addr & SECTION_SIZE) pmd++; +#endif do { *pmd = __pmd(phys | type->prot_sect); @@ -603,6 +623,7 @@ static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, } while (pud++, addr = next, addr != end); } +#ifndef CONFIG_ARM_LPAE static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { @@ -662,6 +683,7 @@ static void __init create_36bit_mapping(struct map_desc *md, pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; } while (addr != end); } +#endif /* !CONFIG_ARM_LPAE */ /* * Create the page directory entries and any necessary @@ -693,6 +715,7 @@ static void __init create_mapping(struct map_desc *md) type = &mem_types[md->type]; +#ifndef CONFIG_ARM_LPAE /* * Catch 36-bit addresses */ @@ -700,6 +723,7 @@ static void __init create_mapping(struct map_desc *md) create_36bit_mapping(md, type); return; } +#endif addr = md->virtual & PAGE_MASK; phys = __pfn_to_phys(md->pfn); @@ -897,7 +921,13 @@ static inline void prepare_page_table(void) pmd_clear(pmd_off_k(addr)); } +#ifdef CONFIG_ARM_LPAE +/* the first page is reserved for pgd */ +#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \ + PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t)) +#else #define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) +#endif /* * Reserve the special regions of memory diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 307a4def8d3a..2d8ff3ad86d3 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -91,8 +91,9 @@ #if L_PTE_SHARED != PTE_EXT_SHARED #error PTE shared bit mismatch #endif -#if (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ - L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED +#if !defined (CONFIG_ARM_LPAE) && \ + (L_PTE_XN+L_PTE_USER+L_PTE_RDONLY+L_PTE_DIRTY+L_PTE_YOUNG+\ + L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED #error Invalid Linux PTE bit settings #endif #endif /* CONFIG_MMU */ diff --git a/arch/arm/mm/proc-v7-3level.S b/arch/arm/mm/proc-v7-3level.S new file mode 100644 index 000000000000..8de0f1dd1549 --- /dev/null +++ b/arch/arm/mm/proc-v7-3level.S @@ -0,0 +1,150 @@ +/* + * arch/arm/mm/proc-v7-3level.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2011 ARM Ltd. + * Author: Catalin Marinas + * based on arch/arm/mm/proc-v7-2level.S + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#define TTB_IRGN_NC (0 << 8) +#define TTB_IRGN_WBWA (1 << 8) +#define TTB_IRGN_WT (2 << 8) +#define TTB_IRGN_WB (3 << 8) +#define TTB_RGN_NC (0 << 10) +#define TTB_RGN_OC_WBWA (1 << 10) +#define TTB_RGN_OC_WT (2 << 10) +#define TTB_RGN_OC_WB (3 << 10) +#define TTB_S (3 << 12) +#define TTB_EAE (1 << 31) + +/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ +#define TTB_FLAGS_UP (TTB_IRGN_WB|TTB_RGN_OC_WB) +#define PMD_FLAGS_UP (PMD_SECT_WB) + +/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ +#define TTB_FLAGS_SMP (TTB_IRGN_WBWA|TTB_S|TTB_RGN_OC_WBWA) +#define PMD_FLAGS_SMP (PMD_SECT_WBWA|PMD_SECT_S) + +/* + * cpu_v7_switch_mm(pgd_phys, tsk) + * + * Set the translation table base pointer to be pgd_phys (physical address of + * the new TTB). + */ +ENTRY(cpu_v7_switch_mm) +#ifdef CONFIG_MMU + ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id + and r3, r1, #0xff + mov r3, r3, lsl #(48 - 32) @ ASID + mcrr p15, 0, r0, r3, c2 @ set TTB 0 + isb +#endif + mov pc, lr +ENDPROC(cpu_v7_switch_mm) + +/* + * cpu_v7_set_pte_ext(ptep, pte) + * + * Set a level 2 translation table entry. + * - ptep - pointer to level 3 translation table entry + * - pte - PTE value to store (64-bit in r2 and r3) + */ +ENTRY(cpu_v7_set_pte_ext) +#ifdef CONFIG_MMU + tst r2, #L_PTE_PRESENT + beq 1f + tst r3, #1 << (55 - 32) @ L_PTE_DIRTY + orreq r2, #L_PTE_RDONLY +1: strd r2, r3, [r0] + mcr p15, 0, r0, c7, c10, 1 @ flush_pte +#endif + mov pc, lr +ENDPROC(cpu_v7_set_pte_ext) + + /* + * Memory region attributes for LPAE (defined in pgtable-3level.h): + * + * n = AttrIndx[2:0] + * + * n MAIR + * UNCACHED 000 00000000 + * BUFFERABLE 001 01000100 + * DEV_WC 001 01000100 + * WRITETHROUGH 010 10101010 + * WRITEBACK 011 11101110 + * DEV_CACHED 011 11101110 + * DEV_SHARED 100 00000100 + * DEV_NONSHARED 100 00000100 + * unused 101 + * unused 110 + * WRITEALLOC 111 11111111 + */ +.equ PRRR, 0xeeaa4400 @ MAIR0 +.equ NMRR, 0xff000004 @ MAIR1 + + /* + * Macro for setting up the TTBRx and TTBCR registers. + * - \ttbr1 updated. + */ + .macro v7_ttb_setup, zero, ttbr0, ttbr1, tmp + ldr \tmp, =swapper_pg_dir @ swapper_pg_dir virtual address + cmp \ttbr1, \tmp @ PHYS_OFFSET > PAGE_OFFSET? (branch below) + mrc p15, 0, \tmp, c2, c0, 2 @ TTB control register + orr \tmp, \tmp, #TTB_EAE + ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP) + ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP) + ALT_SMP(orr \tmp, \tmp, #TTB_FLAGS_SMP << 16) + ALT_UP(orr \tmp, \tmp, #TTB_FLAGS_UP << 16) + /* + * TTBR0/TTBR1 split (PAGE_OFFSET): + * 0x40000000: T0SZ = 2, T1SZ = 0 (not used) + * 0x80000000: T0SZ = 0, T1SZ = 1 + * 0xc0000000: T0SZ = 0, T1SZ = 2 + * + * Only use this feature if PHYS_OFFSET <= PAGE_OFFSET, otherwise + * booting secondary CPUs would end up using TTBR1 for the identity + * mapping set up in TTBR0. + */ + bhi 9001f @ PHYS_OFFSET > PAGE_OFFSET? + orr \tmp, \tmp, #(((PAGE_OFFSET >> 30) - 1) << 16) @ TTBCR.T1SZ +#if defined CONFIG_VMSPLIT_2G + /* PAGE_OFFSET == 0x80000000, T1SZ == 1 */ + add \ttbr1, \ttbr1, #1 << 4 @ skip two L1 entries +#elif defined CONFIG_VMSPLIT_3G + /* PAGE_OFFSET == 0xc0000000, T1SZ == 2 */ + add \ttbr1, \ttbr1, #4096 * (1 + 3) @ only L2 used, skip pgd+3*pmd +#endif + /* CONFIG_VMSPLIT_1G does not need TTBR1 adjustment */ +9001: mcr p15, 0, \tmp, c2, c0, 2 @ TTB control register + mcrr p15, 1, \ttbr1, \zero, c2 @ load TTBR1 + .endm + + __CPUINIT + + /* + * AT + * TFR EV X F IHD LR S + * .EEE ..EE PUI. .TAT 4RVI ZWRS BLDP WCAM + * rxxx rrxx xxx0 0101 xxxx xxxx x111 xxxx < forced + * 11 0 110 1 0011 1100 .111 1101 < we want + */ + .align 2 + .type v7_crval, #object +v7_crval: + crval clear=0x0120c302, mmuset=0x30c23c7d, ucset=0x00c01c7c + + .previous diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index ed1a4d115331..7efa2a721d5d 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -19,7 +19,11 @@ #include "proc-macros.S" +#ifdef CONFIG_ARM_LPAE +#include "proc-v7-3level.S" +#else #include "proc-v7-2level.S" +#endif ENTRY(cpu_v7_proc_init) mov pc, lr @@ -87,7 +91,7 @@ ENDPROC(cpu_v7_dcache_clean_area) /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size -.equ cpu_v7_suspend_size, 4 * 7 +.equ cpu_v7_suspend_size, 4 * 8 #ifdef CONFIG_ARM_CPU_SUSPEND ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r10, lr} @@ -96,10 +100,11 @@ ENTRY(cpu_v7_do_suspend) stmia r0!, {r4 - r5} mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 1 @ TTB 1 + mrc p15, 0, r11, c2, c0, 2 @ TTB control register mrc p15, 0, r8, c1, c0, 0 @ Control register mrc p15, 0, r9, c1, c0, 1 @ Auxiliary control register mrc p15, 0, r10, c1, c0, 2 @ Co-processor access control - stmia r0, {r6 - r10} + stmia r0, {r6 - r11} ldmfd sp!, {r4 - r10, pc} ENDPROC(cpu_v7_do_suspend) @@ -111,13 +116,15 @@ ENTRY(cpu_v7_do_resume) ldmia r0!, {r4 - r5} mcr p15, 0, r4, c13, c0, 0 @ FCSE/PID mcr p15, 0, r5, c13, c0, 3 @ User r/o thread ID - ldmia r0, {r6 - r10} + ldmia r0, {r6 - r11} mcr p15, 0, r6, c3, c0, 0 @ Domain ID +#ifndef CONFIG_ARM_LPAE ALT_SMP(orr r1, r1, #TTB_FLAGS_SMP) ALT_UP(orr r1, r1, #TTB_FLAGS_UP) +#endif mcr p15, 0, r1, c2, c0, 0 @ TTB 0 mcr p15, 0, r7, c2, c0, 1 @ TTB 1 - mcr p15, 0, ip, c2, c0, 2 @ TTB control register + mcr p15, 0, r11, c2, c0, 2 @ TTB control register mrc p15, 0, r4, c1, c0, 1 @ Read Auxiliary control register teq r4, r9 @ Is it already set? mcrne p15, 0, r9, c1, c0, 1 @ No, so write it @@ -291,11 +298,11 @@ __v7_setup_stack: */ .macro __v7_proc initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0 ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ - PMD_FLAGS_SMP | \mm_mmuflags) + PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags) ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \ - PMD_FLAGS_UP | \mm_mmuflags) - .long PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_AP_WRITE | \ - PMD_SECT_AP_READ | \io_mmuflags + PMD_SECT_AF | PMD_FLAGS_UP | \mm_mmuflags) + .long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | \ + PMD_SECT_AP_READ | PMD_SECT_AF | \io_mmuflags W(b) \initfunc .long cpu_arch_name .long cpu_elf_name @@ -308,6 +315,7 @@ __v7_setup_stack: .long v7_cache_fns .endm +#ifndef CONFIG_ARM_LPAE /* * ARM Ltd. Cortex A5 processor. */ @@ -327,6 +335,7 @@ __v7_ca9mp_proc_info: .long 0xff0ffff0 __v7_proc __v7_ca9mp_setup .size __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info +#endif /* CONFIG_ARM_LPAE */ /* * ARM Ltd. Cortex A15 processor. -- cgit v1.2.3 From f7b8156d150f7383b42622a9219b230b36435b4a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:31 +0000 Subject: ARM: LPAE: Add fault handling support The DFSR and IFSR register format is different when LPAE is enabled. In addition, DFSR and IFSR have similar definitions for the fault type. This modifies the fault code to correctly handle the new format. Signed-off-by: Catalin Marinas --- arch/arm/include/asm/system.h | 8 +++++ arch/arm/kernel/hw_breakpoint.c | 8 ++--- arch/arm/mm/alignment.c | 2 +- arch/arm/mm/fault.c | 15 +++++++++ arch/arm/mm/fault.h | 8 +++++ arch/arm/mm/fsr-3level.c | 68 +++++++++++++++++++++++++++++++++++++++++ 6 files changed, 104 insertions(+), 5 deletions(-) create mode 100644 arch/arm/mm/fsr-3level.c (limited to 'arch/arm/mm') diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h index fe7de7571bac..53785828744c 100644 --- a/arch/arm/include/asm/system.h +++ b/arch/arm/include/asm/system.h @@ -80,6 +80,14 @@ struct siginfo; void arm_notify_die(const char *str, struct pt_regs *regs, struct siginfo *info, unsigned long err, unsigned long trap); +#ifdef CONFIG_ARM_LPAE +#define FAULT_CODE_ALIGNMENT 33 +#define FAULT_CODE_DEBUG 34 +#else +#define FAULT_CODE_ALIGNMENT 1 +#define FAULT_CODE_DEBUG 2 +#endif + void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), int sig, int code, const char *name); diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c index 814a52a9dc39..d6a95ef9131d 100644 --- a/arch/arm/kernel/hw_breakpoint.c +++ b/arch/arm/kernel/hw_breakpoint.c @@ -1016,10 +1016,10 @@ static int __init arch_hw_breakpoint_init(void) } /* Register debug fault handler. */ - hook_fault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, - "watchpoint debug exception"); - hook_ifault_code(2, hw_breakpoint_pending, SIGTRAP, TRAP_HWBKPT, - "breakpoint debug exception"); + hook_fault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, + TRAP_HWBKPT, "watchpoint debug exception"); + hook_ifault_code(FAULT_CODE_DEBUG, hw_breakpoint_pending, SIGTRAP, + TRAP_HWBKPT, "breakpoint debug exception"); /* Register hotplug notifier. */ register_cpu_notifier(&dbg_reset_nb); diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index c335c76e0d88..caf14dc059e5 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c @@ -968,7 +968,7 @@ static int __init alignment_init(void) ai_usermode = safe_usermode(ai_usermode, false); } - hook_fault_code(1, do_alignment, SIGBUS, BUS_ADRALN, + hook_fault_code(FAULT_CODE_ALIGNMENT, do_alignment, SIGBUS, BUS_ADRALN, "alignment exception"); /* diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 2a0271677725..eb5520fc755f 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -110,8 +110,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr) pte = pte_offset_map(pmd, addr); printk(", *pte=%08llx", (long long)pte_val(*pte)); +#ifndef CONFIG_ARM_LPAE printk(", *ppte=%08llx", (long long)pte_val(pte[PTE_HWTABLE_PTRS])); +#endif pte_unmap(pte); } while(0); @@ -428,6 +430,12 @@ do_translation_fault(unsigned long addr, unsigned int fsr, pmd = pmd_offset(pud, addr); pmd_k = pmd_offset(pud_k, addr); +#ifdef CONFIG_ARM_LPAE + /* + * Only one hardware entry per PMD with LPAE. + */ + index = 0; +#else /* * On ARM one Linux PGD entry contains two hardware entries (see page * tables layout in pgtable.h). We normally guarantee that we always @@ -437,6 +445,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr, * for the first of pair. */ index = (addr >> SECTION_SHIFT) & 1; +#endif if (pmd_none(pmd_k[index])) goto bad_area; @@ -484,7 +493,11 @@ struct fsr_info { }; /* FSR definition */ +#ifdef CONFIG_ARM_LPAE +#include "fsr-3level.c" +#else #include "fsr-2level.c" +#endif void __init hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *), @@ -553,6 +566,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) arm_notify_die("", regs, &info, ifsr, 0); } +#ifndef CONFIG_ARM_LPAE static int __init exceptions_init(void) { if (cpu_architecture() >= CPU_ARCH_ARMv6) { @@ -575,3 +589,4 @@ static int __init exceptions_init(void) } arch_initcall(exceptions_init); +#endif diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h index 25b45c105be2..cf08bdfbe0d6 100644 --- a/arch/arm/mm/fault.h +++ b/arch/arm/mm/fault.h @@ -8,11 +8,19 @@ #define FSR_WRITE (1 << 11) #define FSR_FS4 (1 << 10) #define FSR_FS3_0 (15) +#define FSR_FS5_0 (0x3f) +#ifdef CONFIG_ARM_LPAE +static inline int fsr_fs(unsigned int fsr) +{ + return fsr & FSR_FS5_0; +} +#else static inline int fsr_fs(unsigned int fsr) { return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; } +#endif void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs); unsigned long search_exception_table(unsigned long addr); diff --git a/arch/arm/mm/fsr-3level.c b/arch/arm/mm/fsr-3level.c new file mode 100644 index 000000000000..05a4e9431836 --- /dev/null +++ b/arch/arm/mm/fsr-3level.c @@ -0,0 +1,68 @@ +static struct fsr_info fsr_info[] = { + { do_bad, SIGBUS, 0, "unknown 0" }, + { do_bad, SIGBUS, 0, "unknown 1" }, + { do_bad, SIGBUS, 0, "unknown 2" }, + { do_bad, SIGBUS, 0, "unknown 3" }, + { do_bad, SIGBUS, 0, "reserved translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, + { do_page_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, + { do_bad, SIGBUS, 0, "reserved access flag fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, + { do_bad, SIGBUS, 0, "reserved permission fault" }, + { do_bad, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, + { do_sect_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, + { do_bad, SIGBUS, 0, "synchronous external abort" }, + { do_bad, SIGBUS, 0, "asynchronous external abort" }, + { do_bad, SIGBUS, 0, "unknown 18" }, + { do_bad, SIGBUS, 0, "unknown 19" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous abort (translation table walk)" }, + { do_bad, SIGBUS, 0, "synchronous parity error" }, + { do_bad, SIGBUS, 0, "asynchronous parity error" }, + { do_bad, SIGBUS, 0, "unknown 26" }, + { do_bad, SIGBUS, 0, "unknown 27" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "synchronous parity error (translation table walk" }, + { do_bad, SIGBUS, 0, "unknown 32" }, + { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" }, + { do_bad, SIGBUS, 0, "debug event" }, + { do_bad, SIGBUS, 0, "unknown 35" }, + { do_bad, SIGBUS, 0, "unknown 36" }, + { do_bad, SIGBUS, 0, "unknown 37" }, + { do_bad, SIGBUS, 0, "unknown 38" }, + { do_bad, SIGBUS, 0, "unknown 39" }, + { do_bad, SIGBUS, 0, "unknown 40" }, + { do_bad, SIGBUS, 0, "unknown 41" }, + { do_bad, SIGBUS, 0, "unknown 42" }, + { do_bad, SIGBUS, 0, "unknown 43" }, + { do_bad, SIGBUS, 0, "unknown 44" }, + { do_bad, SIGBUS, 0, "unknown 45" }, + { do_bad, SIGBUS, 0, "unknown 46" }, + { do_bad, SIGBUS, 0, "unknown 47" }, + { do_bad, SIGBUS, 0, "unknown 48" }, + { do_bad, SIGBUS, 0, "unknown 49" }, + { do_bad, SIGBUS, 0, "unknown 50" }, + { do_bad, SIGBUS, 0, "unknown 51" }, + { do_bad, SIGBUS, 0, "implementation fault (lockdown abort)" }, + { do_bad, SIGBUS, 0, "unknown 53" }, + { do_bad, SIGBUS, 0, "unknown 54" }, + { do_bad, SIGBUS, 0, "unknown 55" }, + { do_bad, SIGBUS, 0, "unknown 56" }, + { do_bad, SIGBUS, 0, "unknown 57" }, + { do_bad, SIGBUS, 0, "implementation fault (coprocessor abort)" }, + { do_bad, SIGBUS, 0, "unknown 59" }, + { do_bad, SIGBUS, 0, "unknown 60" }, + { do_bad, SIGBUS, 0, "unknown 61" }, + { do_bad, SIGBUS, 0, "unknown 62" }, + { do_bad, SIGBUS, 0, "unknown 63" }, +}; + +#define ifsr_info fsr_info -- cgit v1.2.3 From 14d8c9512aef5bf25c017d1b331de51c7928c5d4 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:31 +0000 Subject: ARM: LPAE: Add context switching support With LPAE, TTBRx registers are 64-bit. The ASID is stored in TTBR0 rather than a separate Context ID register. This patch makes the necessary changes to handle context switching on LPAE. Signed-off-by: Catalin Marinas --- arch/arm/mm/context.c | 19 +++++++++++++++++-- 1 file changed, 17 insertions(+), 2 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index 93aac068da94..ee9bb363d606 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c @@ -22,6 +22,21 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION; DEFINE_PER_CPU(struct mm_struct *, current_mm); #endif +#ifdef CONFIG_ARM_LPAE +#define cpu_set_asid(asid) { \ + unsigned long ttbl, ttbh; \ + asm volatile( \ + " mrrc p15, 0, %0, %1, c2 @ read TTBR0\n" \ + " mov %1, %2, lsl #(48 - 32) @ set ASID\n" \ + " mcrr p15, 0, %0, %1, c2 @ set TTBR0\n" \ + : "=&r" (ttbl), "=&r" (ttbh) \ + : "r" (asid & ~ASID_MASK)); \ +} +#else +#define cpu_set_asid(asid) \ + asm(" mcr p15, 0, %0, c13, c0, 1\n" : : "r" (asid)) +#endif + /* * We fork()ed a process, and we need a new context for the child * to run in. We reserve version 0 for initial tasks so we will @@ -37,7 +52,7 @@ void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) static void flush_context(void) { /* set the reserved ASID before flushing the TLB */ - asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0)); + cpu_set_asid(0); isb(); local_flush_tlb_all(); if (icache_is_vivt_asid_tagged()) { @@ -99,7 +114,7 @@ static void reset_context(void *info) set_mm_context(mm, asid); /* set the new ASID */ - asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id)); + cpu_set_asid(mm->context.id); isb(); } -- cgit v1.2.3 From ae2de101739c5a2a43a23a74a0d43aea810fb5a8 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:32 +0000 Subject: ARM: LPAE: Add identity mapping support for the 3-level page table format With LPAE, the pgd is a separate page table with entries pointing to the pmd. The identity_mapping_add() function needs to ensure that the pgd is populated before populating the pmd level. The do..while blocks now loop over the pmd in order to have the same implementation for the two page table formats. The pmd_addr_end() definition has been removed and the generic one used instead. The pmd clean-up is done in the pgd_free() function. Signed-off-by: Catalin Marinas --- arch/arm/mm/idmap.c | 28 +++++++++++++++++++++++++++- 1 file changed, 27 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 660f1bc68f99..feacf4c76712 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -8,6 +8,31 @@ pgd_t *idmap_pgd; +#ifdef CONFIG_ARM_LPAE +static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, + unsigned long prot) +{ + pmd_t *pmd; + unsigned long next; + + if (pud_none_or_clear_bad(pud) || (pud_val(*pud) & L_PGD_SWAPPER)) { + pmd = pmd_alloc_one(&init_mm, addr); + if (!pmd) { + pr_warning("Failed to allocate identity pmd.\n"); + return; + } + pud_populate(&init_mm, pud, pmd); + pmd += pmd_index(addr); + } else + pmd = pmd_offset(pud, addr); + + do { + next = pmd_addr_end(addr, end); + *pmd = __pmd((addr & PMD_MASK) | prot); + flush_pmd_entry(pmd); + } while (pmd++, addr = next, addr != end); +} +#else /* !CONFIG_ARM_LPAE */ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) { @@ -19,6 +44,7 @@ static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, pmd[1] = __pmd(addr); flush_pmd_entry(pmd); } +#endif /* CONFIG_ARM_LPAE */ static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long prot) @@ -36,7 +62,7 @@ static void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long e { unsigned long prot, next; - prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; + prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AF; if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) prot |= PMD_BIT4; -- cgit v1.2.3 From 77f73a2c8e869b035e71eea5cae07c30fe4bded0 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 22 Nov 2011 17:30:32 +0000 Subject: ARM: LPAE: mark memory banks with start > ULONG_MAX as highmem Memory banks living outside of the 32-bit physical address space do not have a 1:1 pa <-> va mapping and therefore the __va macro may wrap. This patch ensures that such banks are marked as highmem so that the Kernel doesn't try to split them up when it sees that the wrapped virtual address overlaps the vmalloc space. Signed-off-by: Will Deacon Signed-off-by: Catalin Marinas Acked-by: Nicolas Pitre --- arch/arm/mm/mmu.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 1935311e17fc..b836d6b2258b 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -799,6 +799,9 @@ void __init sanity_check_meminfo(void) struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; + if (bank->start > ULONG_MAX) + highmem = 1; + #ifdef CONFIG_HIGHMEM if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) @@ -810,7 +813,7 @@ void __init sanity_check_meminfo(void) * Split those memory banks which are partially overlapping * the vmalloc area greatly simplifying things later. */ - if (__va(bank->start) < vmalloc_min && + if (!highmem && __va(bank->start) < vmalloc_min && bank->size > vmalloc_min - __va(bank->start)) { if (meminfo.nr_banks >= NR_BANKS) { printk(KERN_CRIT "NR_BANKS too low, " @@ -830,6 +833,17 @@ void __init sanity_check_meminfo(void) #else bank->highmem = highmem; + /* + * Highmem banks not allowed with !CONFIG_HIGHMEM. + */ + if (highmem) { + printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " + "(!CONFIG_HIGHMEM).\n", + (unsigned long long)bank->start, + (unsigned long long)bank->start + bank->size - 1); + continue; + } + /* * Check whether this memory bank would entirely overlap * the vmalloc area. -- cgit v1.2.3 From 497b7e943d0dc5743454de56dcdb67352bbf96b2 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 22 Nov 2011 17:30:32 +0000 Subject: ARM: LPAE: Add the Kconfig entries This patch adds the ARM_LPAE and ARCH_PHYS_ADDR_T_64BIT Kconfig entries allowing LPAE support to be compiled into the kernel. Signed-off-by: Catalin Marinas --- arch/arm/Kconfig | 2 +- arch/arm/mm/Kconfig | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 44789eff983f..885c04e39400 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1959,7 +1959,7 @@ endchoice config XIP_KERNEL bool "Kernel Execute-In-Place from ROM" - depends on !ZBOOT_ROM + depends on !ZBOOT_ROM && !ARM_LPAE help Execute-In-Place allows the kernel to run from non-volatile storage directly addressable by the CPU, such as NOR flash. This saves RAM diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 67f75a0b66d6..5cf7922ff5e7 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -629,6 +629,23 @@ config IO_36 comment "Processor Features" +config ARM_LPAE + bool "Support for the Large Physical Address Extension" + depends on MMU && CPU_V7 + help + Say Y if you have an ARMv7 processor supporting the LPAE page + table format and you would like to access memory beyond the + 4GB limit. The resulting kernel image will not run on + processors without the LPA extension. + + If unsure, say N. + +config ARCH_PHYS_ADDR_T_64BIT + def_bool ARM_LPAE + +config ARCH_DMA_ADDR_T_64BIT + bool + config ARM_THUMB bool "Support Thumb user binaries" depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_V6 || CPU_V6K || CPU_V7 || CPU_FEROCEON -- cgit v1.2.3 From ce5ea9f3767e8589521319cae2eb6e05c52bd056 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Tue, 29 Nov 2011 15:56:19 +0000 Subject: ARM: l2x0/pl310: Refactor Kconfig to be more maintainable Making CACHE_L2X0 depend on (huge list of MACH_ and ARCH_ configs) is bothersome to maintain and likely to lead to merge conflicts. This patch moves the knowledge of which platforms have a L2x0 or PL310 cache controller to the individual machines. To enable this, a new MIGHT_HAVE_CACHE_L2X0 config option is introduced to allow machines to indicate that they may have such a cache controller independently of each other. Boards/SoCs which cannot reliably operate without the L2 cache controller support will need to select CACHE_L2X0 directly from their own Kconfigs instead. This applies to some TrustZone-enabled boards where Linux runs in the Normal World, for example. Signed-off-by: Dave Martin Acked-by: Anton Vorontsov (for cns3xxx) Acked-by: Tony Lindgren (for omap) Acked-by: Shawn Guo (for imx) Acked-by: Kukjin Kim (for exynos) Acked-by: Sascha Hauer (for imx) Acked-by: Olof Johansson (for tegra) --- arch/arm/Kconfig | 8 ++++++++ arch/arm/mach-exynos/Kconfig | 1 + arch/arm/mach-omap2/Kconfig | 1 + arch/arm/mach-realview/Kconfig | 5 +++++ arch/arm/mach-vexpress/Kconfig | 1 + arch/arm/mm/Kconfig | 23 ++++++++++++++++------- arch/arm/plat-mxc/Kconfig | 1 + 7 files changed, 33 insertions(+), 7 deletions(-) (limited to 'arch/arm/mm') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 44789eff983f..16a4b9e689be 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -344,6 +344,7 @@ config ARCH_HIGHBANK select CPU_V7 select GENERIC_CLOCKEVENTS select HAVE_ARM_SCU + select MIGHT_HAVE_CACHE_L2X0 select USE_OF help Support for the Calxeda Highbank SoC based boards. @@ -361,6 +362,7 @@ config ARCH_CNS3XXX select CPU_V6K select GENERIC_CLOCKEVENTS select ARM_GIC + select MIGHT_HAVE_CACHE_L2X0 select MIGHT_HAVE_PCI select PCI_DOMAINS if PCI help @@ -381,6 +383,7 @@ config ARCH_PRIMA2 select GENERIC_CLOCKEVENTS select CLKDEV_LOOKUP select GENERIC_IRQ_CHIP + select MIGHT_HAVE_CACHE_L2X0 select USE_OF select ZONE_DMA help @@ -633,6 +636,7 @@ config ARCH_TEGRA select GENERIC_GPIO select HAVE_CLK select HAVE_SCHED_CLOCK + select MIGHT_HAVE_CACHE_L2X0 select ARCH_HAS_CPUFREQ help This enables support for NVIDIA Tegra based systems (Tegra APX, @@ -703,6 +707,7 @@ config ARCH_SHMOBILE select CLKDEV_LOOKUP select HAVE_MACH_CLKDEV select GENERIC_CLOCKEVENTS + select MIGHT_HAVE_CACHE_L2X0 select NO_IOPORT select SPARSE_IRQ select MULTI_IRQ_HANDLER @@ -904,6 +909,7 @@ config ARCH_U8500 select CLKDEV_LOOKUP select ARCH_REQUIRE_GPIOLIB select ARCH_HAS_CPUFREQ + select MIGHT_HAVE_CACHE_L2X0 help Support for ST-Ericsson's Ux500 architecture @@ -914,6 +920,7 @@ config ARCH_NOMADIK select CPU_ARM926T select CLKDEV_LOOKUP select GENERIC_CLOCKEVENTS + select MIGHT_HAVE_CACHE_L2X0 select ARCH_REQUIRE_GPIOLIB help Support for the Nomadik platform by ST-Ericsson @@ -973,6 +980,7 @@ config ARCH_ZYNQ select ARM_GIC select ARM_AMBA select ICST + select MIGHT_HAVE_CACHE_L2X0 select USE_OF help Support for Xilinx Zynq ARM Cortex A9 Platform diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig index 724ec0f3560d..7f2347bd6ccc 100644 --- a/arch/arm/mach-exynos/Kconfig +++ b/arch/arm/mach-exynos/Kconfig @@ -17,6 +17,7 @@ choice config ARCH_EXYNOS4 bool "SAMSUNG EXYNOS4" + select MIGHT_HAVE_CACHE_L2X0 help Samsung EXYNOS4 SoCs based systems diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index 503414718905..c8415784e60c 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig @@ -44,6 +44,7 @@ config ARCH_OMAP4 select CPU_V7 select ARM_GIC select LOCAL_TIMERS if SMP + select MIGHT_HAVE_CACHE_L2X0 select PL310_ERRATA_588369 select PL310_ERRATA_727915 select ARM_ERRATA_720789 diff --git a/arch/arm/mach-realview/Kconfig b/arch/arm/mach-realview/Kconfig index dba6d0c1fc17..3dd620f8dd9b 100644 --- a/arch/arm/mach-realview/Kconfig +++ b/arch/arm/mach-realview/Kconfig @@ -12,6 +12,7 @@ config REALVIEW_EB_A9MP bool "Support Multicore Cortex-A9 Tile" depends on MACH_REALVIEW_EB select CPU_V7 + select MIGHT_HAVE_CACHE_L2X0 help Enable support for the Cortex-A9MPCore tile fitted to the Realview(R) Emulation Baseboard platform. @@ -21,6 +22,7 @@ config REALVIEW_EB_ARM11MP depends on MACH_REALVIEW_EB select CPU_V6K select ARCH_HAS_BARRIERS if SMP + select MIGHT_HAVE_CACHE_L2X0 help Enable support for the ARM11MPCore tile fitted to the Realview(R) Emulation Baseboard platform. @@ -39,6 +41,7 @@ config MACH_REALVIEW_PB11MP select CPU_V6K select ARM_GIC select HAVE_PATA_PLATFORM + select MIGHT_HAVE_CACHE_L2X0 select ARCH_HAS_BARRIERS if SMP help Include support for the ARM(R) RealView(R) Platform Baseboard for @@ -51,6 +54,7 @@ config MACH_REALVIEW_PB1176 select CPU_V6 select ARM_GIC select HAVE_TCM + select MIGHT_HAVE_CACHE_L2X0 help Include support for the ARM(R) RealView(R) Platform Baseboard for ARM1176JZF-S. @@ -78,6 +82,7 @@ config MACH_REALVIEW_PBX bool "Support RealView(R) Platform Baseboard Explore" select ARM_GIC select HAVE_PATA_PLATFORM + select MIGHT_HAVE_CACHE_L2X0 select ARCH_SPARSEMEM_ENABLE if CPU_V7 && !REALVIEW_HIGH_PHYS_OFFSET select ZONE_DMA if SPARSEMEM help diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig index 931148487f0b..a8aefc8a6f24 100644 --- a/arch/arm/mach-vexpress/Kconfig +++ b/arch/arm/mach-vexpress/Kconfig @@ -8,5 +8,6 @@ config ARCH_VEXPRESS_CA9X4 select ARM_ERRATA_720789 select ARM_ERRATA_751472 select ARM_ERRATA_753970 + select MIGHT_HAVE_CACHE_L2X0 endmenu diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 67f75a0b66d6..acb1c36adadd 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig @@ -816,14 +816,23 @@ config CACHE_FEROCEON_L2_WRITETHROUGH Say Y here to use the Feroceon L2 cache in writethrough mode. Unless you specifically require this, say N for writeback mode. +config MIGHT_HAVE_CACHE_L2X0 + bool + help + This option should be selected by machines which have a L2x0 + or PL310 cache controller, but where its use is optional. + + The only effect of this option is to make CACHE_L2X0 and + related options available to the user for configuration. + + Boards or SoCs which always require the cache controller + support to be present should select CACHE_L2X0 directly + instead of this option, thus preventing the user from + inadvertently configuring a broken kernel. + config CACHE_L2X0 - bool "Enable the L2x0 outer cache controller" - depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ - REALVIEW_EB_A9MP || ARCH_IMX_V6_V7 || MACH_REALVIEW_PBX || \ - ARCH_NOMADIK || ARCH_OMAP4 || ARCH_EXYNOS4 || ARCH_TEGRA || \ - ARCH_U8500 || ARCH_VEXPRESS_CA9X4 || ARCH_SHMOBILE || \ - ARCH_PRIMA2 || ARCH_ZYNQ || ARCH_CNS3XXX || ARCH_HIGHBANK - default y + bool "Enable the L2x0 outer cache controller" if MIGHT_HAVE_CACHE_L2X0 + default MIGHT_HAVE_CACHE_L2X0 select OUTER_CACHE select OUTER_CACHE_SYNC help diff --git a/arch/arm/plat-mxc/Kconfig b/arch/arm/plat-mxc/Kconfig index b3a1f2b3ada3..b30708e28c1d 100644 --- a/arch/arm/plat-mxc/Kconfig +++ b/arch/arm/plat-mxc/Kconfig @@ -20,6 +20,7 @@ config ARCH_IMX_V6_V7 bool "i.MX3, i.MX6" select AUTO_ZRELADDR if !ZBOOT_ROM select ARM_PATCH_PHYS_VIRT + select MIGHT_HAVE_CACHE_L2X0 help This enables support for systems based on the Freescale i.MX3 and i.MX6 family. -- cgit v1.2.3