diff options
author | Jiri Kosina <jkosina@suse.cz> | 2011-04-26 12:22:15 +0400 |
---|---|---|
committer | Jiri Kosina <jkosina@suse.cz> | 2011-04-26 12:22:59 +0400 |
commit | 07f9479a40cc778bc1462ada11f95b01360ae4ff (patch) | |
tree | 0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /arch/arm/mm | |
parent | 9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff) | |
parent | cd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff) | |
download | linux-07f9479a40cc778bc1462ada11f95b01360ae4ff.tar.xz |
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be
applied for files that didn't exist on the old branch.
Diffstat (limited to 'arch/arm/mm')
-rw-r--r-- | arch/arm/mm/cache-v4wb.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-v4wt.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/cache-v7.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/dma-mapping.c | 11 | ||||
-rw-r--r-- | arch/arm/mm/fault-armv.c | 7 | ||||
-rw-r--r-- | arch/arm/mm/fault.c | 39 | ||||
-rw-r--r-- | arch/arm/mm/idmap.c | 35 | ||||
-rw-r--r-- | arch/arm/mm/init.c | 8 | ||||
-rw-r--r-- | arch/arm/mm/mm.h | 2 | ||||
-rw-r--r-- | arch/arm/mm/mmap.c | 4 | ||||
-rw-r--r-- | arch/arm/mm/mmu.c | 71 | ||||
-rw-r--r-- | arch/arm/mm/pgd.c | 24 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1020e.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1022.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm1026.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm720.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm920.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm922.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm925.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-arm926.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-macros.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-sa1100.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-v6.S | 6 | ||||
-rw-r--r-- | arch/arm/mm/proc-v7.S | 4 | ||||
-rw-r--r-- | arch/arm/mm/proc-xsc3.S | 2 | ||||
-rw-r--r-- | arch/arm/mm/proc-xscale.S | 2 |
27 files changed, 170 insertions, 75 deletions
diff --git a/arch/arm/mm/cache-v4wb.S b/arch/arm/mm/cache-v4wb.S index d3644db467b7..f40c69656d8d 100644 --- a/arch/arm/mm/cache-v4wb.S +++ b/arch/arm/mm/cache-v4wb.S @@ -32,7 +32,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. * * Size Clean (ticks) Dirty (ticks) * 4096 21 20 21 53 55 54 diff --git a/arch/arm/mm/cache-v4wt.S b/arch/arm/mm/cache-v4wt.S index 49c2b66cf3dd..a7b276dbda11 100644 --- a/arch/arm/mm/cache-v4wt.S +++ b/arch/arm/mm/cache-v4wt.S @@ -34,7 +34,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. * * *** This needs benchmarking */ diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 6136e68ce953..dc18d81ef8ce 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -96,7 +96,7 @@ ENDPROC(v7_flush_dcache_all) * Flush the entire cache system. * The data cache flush is now achieved using atomic clean / invalidates * working outwards from L1 cache. This is done using Set/Way based cache - * maintainance instructions. + * maintenance instructions. * The instruction cache can still be invalidated back to the point of * unification in a single instruction. * diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 4771dba61448..82a093cee09a 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c @@ -149,6 +149,7 @@ static int __init consistent_init(void) { int ret = 0; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; int i = 0; @@ -156,7 +157,15 @@ static int __init consistent_init(void) do { pgd = pgd_offset(&init_mm, base); - pmd = pmd_alloc(&init_mm, pgd, base); + + pud = pud_alloc(&init_mm, pgd, base); + if (!pud) { + printk(KERN_ERR "%s: no pud tables\n", __func__); + ret = -ENOMEM; + break; + } + + pmd = pmd_alloc(&init_mm, pud, base); if (!pmd) { printk(KERN_ERR "%s: no pmd tables\n", __func__); ret = -ENOMEM; diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 01210dba0221..7cab79179421 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c @@ -95,6 +95,7 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, { spinlock_t *ptl; pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pte_t *pte; int ret; @@ -103,7 +104,11 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address, if (pgd_none_or_clear_bad(pgd)) return 0; - pmd = pmd_offset(pgd, address); + pud = pud_offset(pgd, address); + if (pud_none_or_clear_bad(pud)) + return 0; + + pmd = pmd_offset(pud, address); if (pmd_none_or_clear_bad(pmd)) return 0; diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index f10f9bac2206..bc0e1d88fd3b 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c @@ -76,9 +76,11 @@ void show_pte(struct mm_struct *mm, unsigned long addr) printk(KERN_ALERT "pgd = %p\n", mm->pgd); pgd = pgd_offset(mm, addr); - printk(KERN_ALERT "[%08lx] *pgd=%08lx", addr, pgd_val(*pgd)); + printk(KERN_ALERT "[%08lx] *pgd=%08llx", + addr, (long long)pgd_val(*pgd)); do { + pud_t *pud; pmd_t *pmd; pte_t *pte; @@ -90,9 +92,21 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; } - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + if (PTRS_PER_PUD != 1) + printk(", *pud=%08lx", pud_val(*pud)); + + if (pud_none(*pud)) + break; + + if (pud_bad(*pud)) { + printk("(bad)"); + break; + } + + pmd = pmd_offset(pud, addr); if (PTRS_PER_PMD != 1) - printk(", *pmd=%08lx", pmd_val(*pmd)); + printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); if (pmd_none(*pmd)) break; @@ -107,8 +121,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr) break; pte = pte_offset_map(pmd, addr); - printk(", *pte=%08lx", pte_val(*pte)); - printk(", *ppte=%08lx", pte_val(pte[PTE_HWTABLE_PTRS])); + printk(", *pte=%08llx", (long long)pte_val(*pte)); + printk(", *ppte=%08llx", + (long long)pte_val(pte[PTE_HWTABLE_PTRS])); pte_unmap(pte); } while(0); @@ -388,6 +403,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr, { unsigned int index; pgd_t *pgd, *pgd_k; + pud_t *pud, *pud_k; pmd_t *pmd, *pmd_k; if (addr < TASK_SIZE) @@ -406,12 +422,19 @@ do_translation_fault(unsigned long addr, unsigned int fsr, if (pgd_none(*pgd_k)) goto bad_area; - if (!pgd_present(*pgd)) set_pgd(pgd, *pgd_k); - pmd_k = pmd_offset(pgd_k, addr); - pmd = pmd_offset(pgd, addr); + pud = pud_offset(pgd, addr); + pud_k = pud_offset(pgd_k, addr); + + if (pud_none(*pud_k)) + goto bad_area; + if (!pud_present(*pud)) + set_pud(pud, *pud_k); + + pmd = pmd_offset(pud, addr); + pmd_k = pmd_offset(pud_k, addr); /* * On ARM one Linux PGD entry contains two hardware entries (see page diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c index 57299446f787..2be9139a4ef3 100644 --- a/arch/arm/mm/idmap.c +++ b/arch/arm/mm/idmap.c @@ -4,10 +4,10 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> -static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, +static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end, unsigned long prot) { - pmd_t *pmd = pmd_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); addr = (addr & PMD_MASK) | prot; pmd[0] = __pmd(addr); @@ -16,6 +16,18 @@ static void idmap_add_pmd(pgd_t *pgd, unsigned long addr, unsigned long end, flush_pmd_entry(pmd); } +static void idmap_add_pud(pgd_t *pgd, unsigned long addr, unsigned long end, + unsigned long prot) +{ + pud_t *pud = pud_offset(pgd, addr); + unsigned long next; + + do { + next = pud_addr_end(addr, end); + idmap_add_pmd(pud, addr, next, prot); + } while (pud++, addr = next, addr != end); +} + void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) { unsigned long prot, next; @@ -27,17 +39,28 @@ void identity_mapping_add(pgd_t *pgd, unsigned long addr, unsigned long end) pgd += pgd_index(addr); do { next = pgd_addr_end(addr, end); - idmap_add_pmd(pgd, addr, next, prot); + idmap_add_pud(pgd, addr, next, prot); } while (pgd++, addr = next, addr != end); } #ifdef CONFIG_SMP -static void idmap_del_pmd(pgd_t *pgd, unsigned long addr, unsigned long end) +static void idmap_del_pmd(pud_t *pud, unsigned long addr, unsigned long end) { - pmd_t *pmd = pmd_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); pmd_clear(pmd); } +static void idmap_del_pud(pgd_t *pgd, unsigned long addr, unsigned long end) +{ + pud_t *pud = pud_offset(pgd, addr); + unsigned long next; + + do { + next = pud_addr_end(addr, end); + idmap_del_pmd(pud, addr, next); + } while (pud++, addr = next, addr != end); +} + void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) { unsigned long next; @@ -45,7 +68,7 @@ void identity_mapping_del(pgd_t *pgd, unsigned long addr, unsigned long end) pgd += pgd_index(addr); do { next = pgd_addr_end(addr, end); - idmap_del_pmd(pgd, addr, next); + idmap_del_pud(pgd, addr, next); } while (pgd++, addr = next, addr != end); } #endif diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index cddd684364da..e5f6fc428348 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -78,7 +78,7 @@ __tagtable(ATAG_INITRD2, parse_tag_initrd2); */ struct meminfo meminfo; -void show_mem(void) +void show_mem(unsigned int filter) { int free = 0, total = 0, reserved = 0; int shared = 0, cached = 0, slab = 0, i; @@ -350,7 +350,7 @@ void __init bootmem_init(void) */ arm_bootmem_free(min, max_low, max_high); - high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; + high_memory = __va(((phys_addr_t)max_low << PAGE_SHIFT) - 1) + 1; /* * This doesn't seem to be used by the Linux memory manager any @@ -398,8 +398,8 @@ free_memmap(unsigned long start_pfn, unsigned long end_pfn) * Convert to physical addresses, and * round start upwards and end downwards. */ - pg = PAGE_ALIGN(__pa(start_pg)); - pgend = __pa(end_pg) & PAGE_MASK; + pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); + pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; /* * If there are free pages between these, diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h index 36960df5fb76..d2384106af9c 100644 --- a/arch/arm/mm/mm.h +++ b/arch/arm/mm/mm.h @@ -7,7 +7,7 @@ extern pmd_t *top_pmd; static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) { - return pmd_offset(pgd, virt); + return pmd_offset(pud_offset(pgd, virt), virt); } static inline pmd_t *pmd_off_k(unsigned long virt) diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index afe209e1e1f8..74be05f3e03a 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c @@ -7,6 +7,7 @@ #include <linux/shm.h> #include <linux/sched.h> #include <linux/io.h> +#include <linux/personality.h> #include <linux/random.h> #include <asm/cputype.h> #include <asm/system.h> @@ -82,7 +83,8 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, mm->cached_hole_size = 0; } /* 8 bits of randomness in 20 address space bits */ - if (current->flags & PF_RANDOMIZE) + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; full_search: diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index ff7b43b5885a..6cf76b3b68d1 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -533,7 +533,7 @@ static void __init *early_alloc(unsigned long sz) static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { - pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t)); + pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE); __pmd_populate(pmd, __pa(pte), prot); } BUG_ON(pmd_bad(*pmd)); @@ -551,11 +551,11 @@ static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, } while (pte++, addr += PAGE_SIZE, addr != end); } -static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, +static void __init alloc_init_section(pud_t *pud, unsigned long addr, unsigned long end, phys_addr_t phys, const struct mem_type *type) { - pmd_t *pmd = pmd_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); /* * Try a section mapping - end, addr and phys must all be aligned @@ -584,6 +584,19 @@ static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, } } +static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end, + unsigned long phys, const struct mem_type *type) +{ + pud_t *pud = pud_offset(pgd, addr); + unsigned long next; + + do { + next = pud_addr_end(addr, end); + alloc_init_section(pud, addr, next, phys, type); + phys += next - addr; + } while (pud++, addr = next, addr != end); +} + static void __init create_36bit_mapping(struct map_desc *md, const struct mem_type *type) { @@ -592,13 +605,13 @@ static void __init create_36bit_mapping(struct map_desc *md, pgd_t *pgd; addr = md->virtual; - phys = (unsigned long)__pfn_to_phys(md->pfn); + phys = __pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length); if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { printk(KERN_ERR "MM: CPU does not support supersection " "mapping for 0x%08llx at 0x%08lx\n", - __pfn_to_phys((u64)md->pfn), addr); + (long long)__pfn_to_phys((u64)md->pfn), addr); return; } @@ -611,14 +624,14 @@ static void __init create_36bit_mapping(struct map_desc *md, if (type->domain) { printk(KERN_ERR "MM: invalid domain in supersection " "mapping for 0x%08llx at 0x%08lx\n", - __pfn_to_phys((u64)md->pfn), addr); + (long long)__pfn_to_phys((u64)md->pfn), addr); return; } if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { - printk(KERN_ERR "MM: cannot create mapping for " - "0x%08llx at 0x%08lx invalid alignment\n", - __pfn_to_phys((u64)md->pfn), addr); + printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" + " at 0x%08lx invalid alignment\n", + (long long)__pfn_to_phys((u64)md->pfn), addr); return; } @@ -631,7 +644,8 @@ static void __init create_36bit_mapping(struct map_desc *md, pgd = pgd_offset_k(addr); end = addr + length; do { - pmd_t *pmd = pmd_offset(pgd, addr); + pud_t *pud = pud_offset(pgd, addr); + pmd_t *pmd = pmd_offset(pud, addr); int i; for (i = 0; i < 16; i++) @@ -652,22 +666,23 @@ static void __init create_36bit_mapping(struct map_desc *md, */ static void __init create_mapping(struct map_desc *md) { - unsigned long phys, addr, length, end; + unsigned long addr, length, end; + phys_addr_t phys; const struct mem_type *type; pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { - printk(KERN_WARNING "BUG: not creating mapping for " - "0x%08llx at 0x%08lx in user region\n", - __pfn_to_phys((u64)md->pfn), md->virtual); + printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" + " at 0x%08lx in user region\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { - printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " - "overlaps vmalloc space\n", - __pfn_to_phys((u64)md->pfn), md->virtual); + printk(KERN_WARNING "BUG: mapping for 0x%08llx" + " at 0x%08lx overlaps vmalloc space\n", + (long long)__pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; @@ -681,13 +696,13 @@ static void __init create_mapping(struct map_desc *md) } addr = md->virtual & PAGE_MASK; - phys = (unsigned long)__pfn_to_phys(md->pfn); + phys = __pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { - printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " + printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " "be mapped using pages, ignoring.\n", - __pfn_to_phys(md->pfn), addr); + (long long)__pfn_to_phys(md->pfn), addr); return; } @@ -696,7 +711,7 @@ static void __init create_mapping(struct map_desc *md) do { unsigned long next = pgd_addr_end(addr, end); - alloc_init_section(pgd, addr, next, phys, type); + alloc_init_pud(pgd, addr, next, phys, type); phys += next - addr; addr = next; @@ -794,9 +809,10 @@ static void __init sanity_check_meminfo(void) */ if (__va(bank->start) >= vmalloc_min || __va(bank->start) < (void *)PAGE_OFFSET) { - printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " + printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx " "(vmalloc region overlap).\n", - bank->start, bank->start + bank->size - 1); + (unsigned long long)bank->start, + (unsigned long long)bank->start + bank->size - 1); continue; } @@ -807,10 +823,11 @@ static void __init sanity_check_meminfo(void) if (__va(bank->start + bank->size) > vmalloc_min || __va(bank->start + bank->size) < __va(bank->start)) { unsigned long newsize = vmalloc_min - __va(bank->start); - printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " - "to -%.8lx (vmalloc region overlap).\n", - bank->start, bank->start + bank->size - 1, - bank->start + newsize - 1); + printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx " + "to -%.8llx (vmalloc region overlap).\n", + (unsigned long long)bank->start, + (unsigned long long)bank->start + bank->size - 1, + (unsigned long long)bank->start + newsize - 1); bank->size = newsize; } #endif diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c index 709244c66fa3..b2027c154b2a 100644 --- a/arch/arm/mm/pgd.c +++ b/arch/arm/mm/pgd.c @@ -23,6 +23,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *new_pgd, *init_pgd; + pud_t *new_pud, *init_pud; pmd_t *new_pmd, *init_pmd; pte_t *new_pte, *init_pte; @@ -46,7 +47,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) * On ARM, first page must always be allocated since it * contains the machine vectors. */ - new_pmd = pmd_alloc(mm, new_pgd, 0); + new_pud = pud_alloc(mm, new_pgd, 0); + if (!new_pud) + goto no_pud; + + new_pmd = pmd_alloc(mm, new_pud, 0); if (!new_pmd) goto no_pmd; @@ -54,7 +59,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) if (!new_pte) goto no_pte; - init_pmd = pmd_offset(init_pgd, 0); + init_pud = pud_offset(init_pgd, 0); + init_pmd = pmd_offset(init_pud, 0); init_pte = pte_offset_map(init_pmd, 0); set_pte_ext(new_pte, *init_pte, 0); pte_unmap(init_pte); @@ -66,6 +72,8 @@ pgd_t *pgd_alloc(struct mm_struct *mm) no_pte: pmd_free(mm, new_pmd); no_pmd: + pud_free(mm, new_pud); +no_pud: free_pages((unsigned long)new_pgd, 2); no_pgd: return NULL; @@ -74,6 +82,7 @@ no_pgd: void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) { pgd_t *pgd; + pud_t *pud; pmd_t *pmd; pgtable_t pte; @@ -84,7 +93,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) if (pgd_none_or_clear_bad(pgd)) goto no_pgd; - pmd = pmd_offset(pgd, 0); + pud = pud_offset(pgd, 0); + if (pud_none_or_clear_bad(pud)) + goto no_pud; + + pmd = pmd_offset(pud, 0); if (pmd_none_or_clear_bad(pmd)) goto no_pmd; @@ -92,8 +105,11 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd_base) pmd_clear(pmd); pte_free(mm, pte); no_pmd: - pgd_clear(pgd); + pud_clear(pud); pmd_free(mm, pmd); +no_pud: + pgd_clear(pgd); + pud_free(mm, pud); no_pgd: free_pages((unsigned long) pgd_base, 2); } diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index 226e3d8351c2..6c4e7fd6c8af 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S @@ -64,7 +64,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 86d9c2cf0bce..4ce947c19623 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S @@ -64,7 +64,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 83d3dd34f846..c8884c5413a2 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index 686043ee7281..413684660aad 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 32768 diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 665266da143c..7a06e5964f59 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S @@ -63,7 +63,7 @@ ENTRY(cpu_arm720_proc_fin) /* * Function: arm720_proc_do_idle(void) * Params : r0 = unused - * Purpose : put the processer in proper idle mode + * Purpose : put the processor in proper idle mode */ ENTRY(cpu_arm720_do_idle) mov pc, lr diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 219980ec8b6e..bf8a1d1cccb6 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S @@ -53,7 +53,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 65536 @@ -390,7 +390,7 @@ ENTRY(cpu_arm920_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm920_suspend_size .equ cpu_arm920_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm920_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 36154b1e792a..95ba1fc56e4d 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S @@ -54,7 +54,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. (I think this should + * cache line maintenance instructions. (I think this should * be 32768). */ #define CACHE_DLIMIT 8192 diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 89c5e0009c4c..541e4774eea1 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S @@ -77,7 +77,7 @@ /* * This is the size at which it becomes more efficient to * clean the whole cache, rather than using the individual - * cache line maintainence instructions. + * cache line maintenance instructions. */ #define CACHE_DLIMIT 8192 diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 6a4bdb2c94a7..0ed85d930c09 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S @@ -404,7 +404,7 @@ ENTRY(cpu_arm926_set_pte_ext) /* Suspend/resume support: taken from arch/arm/plat-s3c24xx/sleep.S */ .globl cpu_arm926_suspend_size .equ cpu_arm926_suspend_size, 4 * 3 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_arm926_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c13, c0, 0 @ PID diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index e32fa499194c..34261f9486b9 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S @@ -85,7 +85,7 @@ /* * Sanity check the PTE configuration for the code below - which makes - * certain assumptions about how these bits are layed out. + * certain assumptions about how these bits are laid out. */ #ifdef CONFIG_MMU #if L_PTE_SHARED != PTE_EXT_SHARED diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 74483d1977fe..184a9c997e36 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S @@ -171,7 +171,7 @@ ENTRY(cpu_sa1100_set_pte_ext) .globl cpu_sa1100_suspend_size .equ cpu_sa1100_suspend_size, 4*4 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_sa1100_do_suspend) stmfd sp!, {r4 - r7, lr} mrc p15, 0, r4, c3, c0, 0 @ domain ID diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 832b6bdc192c..7c99cb4c8e4f 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S @@ -124,7 +124,7 @@ ENTRY(cpu_v6_set_pte_ext) /* Suspend/resume support: taken from arch/arm/mach-s3c64xx/sleep.S */ .globl cpu_v6_suspend_size .equ cpu_v6_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v6_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID @@ -132,7 +132,7 @@ ENTRY(cpu_v6_do_suspend) mrc p15, 0, r6, c3, c0, 0 @ Domain ID mrc p15, 0, r7, c2, c0, 0 @ Translation table base 0 mrc p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mrc p15, 0, r9, c1, c0, 1 @ auxillary control register + mrc p15, 0, r9, c1, c0, 1 @ auxiliary control register mrc p15, 0, r10, c1, c0, 2 @ co-processor access control mrc p15, 0, r11, c1, c0, 0 @ control register stmia r0, {r4 - r11} @@ -151,7 +151,7 @@ ENTRY(cpu_v6_do_resume) mcr p15, 0, r6, c3, c0, 0 @ Domain ID mcr p15, 0, r7, c2, c0, 0 @ Translation table base 0 mcr p15, 0, r8, c2, c0, 1 @ Translation table base 1 - mcr p15, 0, r9, c1, c0, 1 @ auxillary control register + mcr p15, 0, r9, c1, c0, 1 @ auxiliary control register mcr p15, 0, r10, c1, c0, 2 @ co-processor access control mcr p15, 0, ip, c2, c0, 2 @ TTB control register mcr p15, 0, ip, c7, c5, 4 @ ISB diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 262fa88a7439..babfba09c89f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S @@ -211,7 +211,7 @@ cpu_v7_name: /* Suspend/resume support: derived from arch/arm/mach-s5pv210/sleep.S */ .globl cpu_v7_suspend_size .equ cpu_v7_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_v7_do_suspend) stmfd sp!, {r4 - r11, lr} mrc p15, 0, r4, c13, c0, 0 @ FCSE/PID @@ -237,7 +237,7 @@ ENTRY(cpu_v7_do_resume) mcr p15, 0, r7, c2, c0, 0 @ TTB 0 mcr p15, 0, r8, c2, c0, 1 @ TTB 1 mcr p15, 0, ip, c2, c0, 2 @ TTB control register - mcr p15, 0, r10, c1, c0, 1 @ Auxillary control register + mcr p15, 0, r10, c1, c0, 1 @ Auxiliary control register mcr p15, 0, r11, c1, c0, 2 @ Co-processor access control ldr r4, =PRRR @ PRRR ldr r5, =NMRR @ NMRR diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 63d8b2044e84..596213699f37 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S @@ -417,7 +417,7 @@ ENTRY(cpu_xsc3_set_pte_ext) .globl cpu_xsc3_suspend_size .equ cpu_xsc3_suspend_size, 4 * 8 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xsc3_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 086038cd86ab..ce233bcbf506 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S @@ -518,7 +518,7 @@ ENTRY(cpu_xscale_set_pte_ext) .globl cpu_xscale_suspend_size .equ cpu_xscale_suspend_size, 4 * 7 -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP ENTRY(cpu_xscale_do_suspend) stmfd sp!, {r4 - r10, lr} mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode |