summaryrefslogtreecommitdiff
path: root/arch/powerpc
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2008-12-09 11:21:36 +0300
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-01-08 08:25:09 +0300
commit893473df78b4407c9ab75cb55479409795953b01 (patch)
treef58da72162345985841badce4ede52cc98f69e02 /arch/powerpc
parent0be210fd664b07531cb238bafb453a2a54c2a7a8 (diff)
downloadlinux-893473df78b4407c9ab75cb55479409795953b01.tar.xz
powerpc/mm: Cleanup careful_allocation(): consolidate memset()
Both users of careful_allocation() immediately memset() the result. So, just do it in one place. Also give careful_allocation() a 'z' prefix to bring it in line with kzmalloc() and friends. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/numa.c11
1 files changed, 5 insertions, 6 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9ec9939f9fb0..7393bd76d698 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -824,7 +824,7 @@ static void __init dump_numa_memory_topology(void)
*
* Returns the virtual address of the memory.
*/
-static void __init *careful_allocation(int nid, unsigned long size,
+static void __init *careful_zallocation(int nid, unsigned long size,
unsigned long align,
unsigned long end_pfn)
{
@@ -864,6 +864,7 @@ static void __init *careful_allocation(int nid, unsigned long size,
dbg("alloc_bootmem %p %lx\n", ret, size);
}
+ memset(ret, 0, size);
return ret;
}
@@ -971,10 +972,9 @@ void __init do_init_bootmem(void)
* previous nodes' bootmem to be initialized and have
* all reserved areas marked.
*/
- NODE_DATA(nid) = careful_allocation(nid,
+ NODE_DATA(nid) = careful_zallocation(nid,
sizeof(struct pglist_data),
SMP_CACHE_BYTES, end_pfn);
- memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
dbg("node %d\n", nid);
dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
@@ -990,10 +990,9 @@ void __init do_init_bootmem(void)
dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
- bootmem_vaddr = careful_allocation(nid,
+ bootmem_vaddr = careful_zallocation(nid,
bootmap_pages << PAGE_SHIFT,
PAGE_SIZE, end_pfn);
- memset(bootmem_vaddr, 0, bootmap_pages << PAGE_SHIFT);
dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
@@ -1004,7 +1003,7 @@ void __init do_init_bootmem(void)
free_bootmem_with_active_regions(nid, end_pfn);
/*
* Be very careful about moving this around. Future
- * calls to careful_allocation() depend on this getting
+ * calls to careful_zallocation() depend on this getting
* done correctly.
*/
mark_reserved_regions_for_nid(nid);