diff options
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/fsl_booke_mmu.c | 9 | ||||
-rw-r--r-- | arch/powerpc/mm/mmu_decl.h | 17 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 62 | ||||
-rw-r--r-- | arch/powerpc/mm/pgtable_32.c | 3 | ||||
-rw-r--r-- | arch/powerpc/mm/slice.c | 11 | ||||
-rw-r--r-- | arch/powerpc/mm/stab.c | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_nohash.c | 3 |
7 files changed, 63 insertions, 46 deletions
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 23cee39534fd..1971e4ee3d6e 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c @@ -56,18 +56,11 @@ extern void loadcam_entry(unsigned int index); unsigned int tlbcam_index; -unsigned int num_tlbcam_entries; static unsigned long __cam0, __cam1, __cam2; #define NUM_TLBCAMS (16) -struct tlbcam { - u32 MAS0; - u32 MAS1; - u32 MAS2; - u32 MAS3; - u32 MAS7; -} TLBCAM[NUM_TLBCAMS]; +struct tlbcam TLBCAM[NUM_TLBCAMS]; struct tlbcamrange { unsigned long start; diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 4314b39b6faf..d1f9c62dc177 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -30,11 +30,11 @@ #if defined(CONFIG_40x) || defined(CONFIG_8xx) static inline void _tlbil_all(void) { - asm volatile ("sync; tlbia; isync" : : : "memory") + asm volatile ("sync; tlbia; isync" : : : "memory"); } static inline void _tlbil_pid(unsigned int pid) { - asm volatile ("sync; tlbia; isync" : : : "memory") + asm volatile ("sync; tlbia; isync" : : : "memory"); } #else /* CONFIG_40x || CONFIG_8xx */ extern void _tlbil_all(void); @@ -47,7 +47,7 @@ extern void _tlbil_pid(unsigned int pid); #ifdef CONFIG_8xx static inline void _tlbil_va(unsigned long address, unsigned int pid) { - asm volatile ("tlbie %0; sync" : : "r" (address) : "memory") + asm volatile ("tlbie %0; sync" : : "r" (address) : "memory"); } #else /* CONFIG_8xx */ extern void _tlbil_va(unsigned long address, unsigned int pid); @@ -75,6 +75,15 @@ extern void _tlbia(void); #endif /* CONFIG_PPC_MMU_NOHASH */ #ifdef CONFIG_PPC32 + +struct tlbcam { + u32 MAS0; + u32 MAS1; + u32 MAS2; + u32 MAS3; + u32 MAS7; +}; + extern void mapin_ram(void); extern int map_page(unsigned long va, phys_addr_t pa, int flags); extern void setbat(int index, unsigned long virt, phys_addr_t phys, @@ -90,8 +99,6 @@ extern unsigned int rtas_data, rtas_size; struct hash_pte; extern struct hash_pte *Hash, *Hash_end; extern unsigned long Hash_size, Hash_mask; - -extern unsigned int num_tlbcam_entries; #endif extern unsigned long ioremap_bot; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index cf81049e1e51..7393bd76d698 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -822,42 +822,50 @@ static void __init dump_numa_memory_topology(void) * required. nid is the preferred node and end is the physical address of * the highest address in the node. * - * Returns the physical address of the memory. + * Returns the virtual address of the memory. */ -static void __init *careful_allocation(int nid, unsigned long size, +static void __init *careful_zallocation(int nid, unsigned long size, unsigned long align, unsigned long end_pfn) { + void *ret; int new_nid; - unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); + unsigned long ret_paddr; + + ret_paddr = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); /* retry over all memory */ - if (!ret) - ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); + if (!ret_paddr) + ret_paddr = __lmb_alloc_base(size, align, lmb_end_of_DRAM()); - if (!ret) - panic("numa.c: cannot allocate %lu bytes on node %d", + if (!ret_paddr) + panic("numa.c: cannot allocate %lu bytes for node %d", size, nid); + ret = __va(ret_paddr); + /* - * If the memory came from a previously allocated node, we must - * retry with the bootmem allocator. + * We initialize the nodes in numeric order: 0, 1, 2... + * and hand over control from the LMB allocator to the + * bootmem allocator. If this function is called for + * node 5, then we know that all nodes <5 are using the + * bootmem allocator instead of the LMB allocator. + * + * So, check the nid from which this allocation came + * and double check to see if we need to use bootmem + * instead of the LMB. We don't free the LMB memory + * since it would be useless. */ - new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); + new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT); if (new_nid < nid) { - ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), + ret = __alloc_bootmem_node(NODE_DATA(new_nid), size, align, 0); - if (!ret) - panic("numa.c: cannot allocate %lu bytes on node %d", - size, new_nid); - - ret = __pa(ret); - - dbg("alloc_bootmem %lx %lx\n", ret, size); + dbg("alloc_bootmem %p %lx\n", ret, size); } - return (void *)ret; + memset(ret, 0, size); + return ret; } static struct notifier_block __cpuinitdata ppc64_numa_nb = { @@ -952,7 +960,7 @@ void __init do_init_bootmem(void) for_each_online_node(nid) { unsigned long start_pfn, end_pfn; - unsigned long bootmem_paddr; + void *bootmem_vaddr; unsigned long bootmap_pages; get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); @@ -964,11 +972,9 @@ void __init do_init_bootmem(void) * previous nodes' bootmem to be initialized and have * all reserved areas marked. */ - NODE_DATA(nid) = careful_allocation(nid, + NODE_DATA(nid) = careful_zallocation(nid, sizeof(struct pglist_data), SMP_CACHE_BYTES, end_pfn); - NODE_DATA(nid) = __va(NODE_DATA(nid)); - memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); dbg("node %d\n", nid); dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); @@ -984,20 +990,20 @@ void __init do_init_bootmem(void) dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); - bootmem_paddr = (unsigned long)careful_allocation(nid, + bootmem_vaddr = careful_zallocation(nid, bootmap_pages << PAGE_SHIFT, PAGE_SIZE, end_pfn); - memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); - dbg("bootmap_paddr = %lx\n", bootmem_paddr); + dbg("bootmap_vaddr = %p\n", bootmem_vaddr); - init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, + init_bootmem_node(NODE_DATA(nid), + __pa(bootmem_vaddr) >> PAGE_SHIFT, start_pfn, end_pfn); free_bootmem_with_active_regions(nid, end_pfn); /* * Be very careful about moving this around. Future - * calls to careful_allocation() depend on this getting + * calls to careful_zallocation() depend on this getting * done correctly. */ mark_reserved_regions_for_nid(nid); diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 38ff35f2142a..22972cd83cc9 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -266,7 +266,8 @@ int map_page(unsigned long va, phys_addr_t pa, int flags) /* The PTE should never be already set nor present in the * hash table */ - BUG_ON(pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)); + BUG_ON((pte_val(*pg) & (_PAGE_PRESENT | _PAGE_HASHPTE)) && + flags); set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT, __pgprot(flags))); } diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c index db44e02e045b..ba5194817f8a 100644 --- a/arch/powerpc/mm/slice.c +++ b/arch/powerpc/mm/slice.c @@ -710,9 +710,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { struct slice_mask mask, available; + unsigned int psize = mm->context.user_psize; mask = slice_range_to_mask(addr, len); - available = slice_mask_for_size(mm, mm->context.user_psize); + available = slice_mask_for_size(mm, psize); +#ifdef CONFIG_PPC_64K_PAGES + /* We need to account for 4k slices too */ + if (psize == MMU_PAGE_64K) { + struct slice_mask compat_mask; + compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K); + or_mask(available, compat_mask); + } +#endif #if 0 /* too verbose */ slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n", diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 60e6032a8088..98cd1dc2ae75 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -251,8 +251,8 @@ void __init stabs_alloc(void) paca[cpu].stab_addr = newstab; paca[cpu].stab_real = virt_to_abs(newstab); - printk(KERN_INFO "Segment table for CPU %d at 0x%lx " - "virtual, 0x%lx absolute\n", + printk(KERN_INFO "Segment table for CPU %d at 0x%llx " + "virtual, 0x%llx absolute\n", cpu, paca[cpu].stab_addr, paca[cpu].stab_real); } } diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index 803a64c02b06..39ac22b13c73 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c @@ -189,8 +189,9 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) smp_call_function(do_flush_tlb_mm_ipi, NULL, 1); _tlbil_pid(0); preempt_enable(); -#endif +#else _tlbil_pid(0); +#endif } EXPORT_SYMBOL(flush_tlb_kernel_range); |