diff options
Diffstat (limited to 'arch/arm/mm/mmu.c')
-rw-r--r-- | arch/arm/mm/mmu.c | 70 |
1 files changed, 70 insertions, 0 deletions
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 2d6d682c206a..a713e40e1f1a 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -211,6 +211,12 @@ static struct mem_type mem_types[] = { PMD_SECT_TEX(1), .domain = DOMAIN_IO, }, + [MT_DEVICE_WC] = { /* ioremap_wc */ + .prot_pte = PROT_PTE_DEVICE, + .prot_l1 = PMD_TYPE_TABLE, + .prot_sect = PROT_SECT_DEVICE, + .domain = DOMAIN_IO, + }, [MT_CACHECLEAN] = { .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, .domain = DOMAIN_KERNEL, @@ -273,6 +279,20 @@ static void __init build_mem_type_table(void) } /* + * On non-Xscale3 ARMv5-and-older systems, use CB=01 + * (Uncached/Buffered) for ioremap_wc() mappings. On XScale3 + * and ARMv6+, use TEXCB=00100 mappings (Inner/Outer Uncacheable + * in xsc3 parlance, Uncached Normal in ARMv6 parlance). + */ + if (cpu_is_xsc3() || cpu_arch >= CPU_ARCH_ARMv6) { + mem_types[MT_DEVICE_WC].prot_pte_ext |= PTE_EXT_TEX(1); + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); + } else { + mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_BUFFERABLE; + mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; + } + + /* * ARMv5 and lower, bit 4 must be set for page tables. * (was: cache "update-able on write" bit on ARM610) * However, Xscale cores require this bit to be cleared. @@ -568,6 +588,55 @@ void __init iotable_init(struct map_desc *io_desc, int nr) create_mapping(io_desc + i); } +static int __init check_membank_valid(struct membank *mb) +{ + /* + * Check whether this memory region has non-zero size. + */ + if (mb->size == 0) + return 0; + + /* + * Check whether this memory region would entirely overlap + * the vmalloc area. + */ + if (phys_to_virt(mb->start) >= VMALLOC_MIN) { + printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " + "(vmalloc region overlap).\n", + mb->start, mb->start + mb->size - 1); + return 0; + } + + /* + * Check whether this memory region would partially overlap + * the vmalloc area. + */ + if (phys_to_virt(mb->start + mb->size) < phys_to_virt(mb->start) || + phys_to_virt(mb->start + mb->size) > VMALLOC_MIN) { + unsigned long newsize = VMALLOC_MIN - phys_to_virt(mb->start); + + printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " + "to -%.8lx (vmalloc region overlap).\n", + mb->start, mb->start + mb->size - 1, + mb->start + newsize - 1); + mb->size = newsize; + } + + return 1; +} + +static void __init sanity_check_meminfo(struct meminfo *mi) +{ + int i; + int j; + + for (i = 0, j = 0; i < mi->nr_banks; i++) { + if (check_membank_valid(&mi->bank[i])) + mi->bank[j++] = mi->bank[i]; + } + mi->nr_banks = j; +} + static inline void prepare_page_table(struct meminfo *mi) { unsigned long addr; @@ -753,6 +822,7 @@ void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc) void *zero_page; build_mem_type_table(); + sanity_check_meminfo(mi); prepare_page_table(mi); bootmem_init(mi); devicemaps_init(mdesc); |