diff options
author | Tejun Heo <tj@kernel.org> | 2011-04-05 02:23:49 +0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2011-04-07 04:56:57 +0400 |
commit | 5b8443b25c0f323ec190d094e4b441957b02664e (patch) | |
tree | ab64e1087999f668729ce45c2e9543ca63f298c9 /arch/x86/mm/numa_32.c | |
parent | a6c24f7a705d939ddd2fcaa443fa3d8e852b933d (diff) | |
download | linux-5b8443b25c0f323ec190d094e4b441957b02664e.tar.xz |
x86-32, numa: Remove redundant top-down alloc code from remap initialization
memblock_find_in_range() now does top-down allocation by default, so
there's no reason for its callers to explicitly implement it by
gradually lowering the start address.
Remove redundant top-down allocation logic from init_meminit() and
calculate_numa_remap_pages().
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1301955840-7246-4-git-send-email-tj@kernel.org
Acked-by: Yinghai Lu <yinghai@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/mm/numa_32.c')
-rw-r--r-- | arch/x86/mm/numa_32.c | 43 |
1 files changed, 14 insertions, 29 deletions
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 50e82507eab4..60701a5e0de0 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -270,8 +270,7 @@ static __init unsigned long calculate_numa_remap_pages(void) unsigned long size, reserve_pages = 0; for_each_online_node(nid) { - u64 node_kva_target; - u64 node_kva_final; + u64 node_kva; /* * The acpi/srat node info can show hot-add memroy zones @@ -295,19 +294,11 @@ static __init unsigned long calculate_numa_remap_pages(void) /* now the roundup is correct, convert to PAGE_SIZE pages */ size = size * PTRS_PER_PTE; - node_kva_target = round_down(node_end_pfn[nid] - size, - PTRS_PER_PTE); - node_kva_target <<= PAGE_SHIFT; - do { - node_kva_final = memblock_find_in_range(node_kva_target, + node_kva = memblock_find_in_range(node_start_pfn[nid] << PAGE_SHIFT, ((u64)node_end_pfn[nid])<<PAGE_SHIFT, - ((u64)size)<<PAGE_SHIFT, - LARGE_PAGE_BYTES); - node_kva_target -= LARGE_PAGE_BYTES; - } while (node_kva_final == MEMBLOCK_ERROR && - (node_kva_target>>PAGE_SHIFT) > (node_start_pfn[nid])); - - if (node_kva_final == MEMBLOCK_ERROR) + ((u64)size)<<PAGE_SHIFT, + LARGE_PAGE_BYTES); + if (node_kva == MEMBLOCK_ERROR) panic("Can not get kva ram\n"); node_remap_size[nid] = size; @@ -315,7 +306,7 @@ static __init unsigned long calculate_numa_remap_pages(void) reserve_pages += size; printk(KERN_DEBUG "Reserving %ld pages of KVA for lmem_map of" " node %d at %llx\n", - size, nid, node_kva_final>>PAGE_SHIFT); + size, nid, node_kva >> PAGE_SHIFT); /* * prevent kva address below max_low_pfn want it on system @@ -328,11 +319,11 @@ static __init unsigned long calculate_numa_remap_pages(void) * to use it as free. * So memblock_x86_reserve_range here, hope we don't run out of that array */ - memblock_x86_reserve_range(node_kva_final, - node_kva_final+(((u64)size)<<PAGE_SHIFT), - "KVA RAM"); + memblock_x86_reserve_range(node_kva, + node_kva + (((u64)size)<<PAGE_SHIFT), + "KVA RAM"); - node_remap_start_pfn[nid] = node_kva_final>>PAGE_SHIFT; + node_remap_start_pfn[nid] = node_kva >> PAGE_SHIFT; } printk(KERN_INFO "Reserving total of %lx pages for numa KVA remap\n", reserve_pages); @@ -356,7 +347,6 @@ static void init_remap_allocator(int nid) void __init initmem_init(void) { int nid; - long kva_target_pfn; /* * When mapping a NUMA machine we allocate the node_mem_map arrays @@ -371,15 +361,10 @@ void __init initmem_init(void) kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); - kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); - do { - kva_start_pfn = memblock_find_in_range(kva_target_pfn<<PAGE_SHIFT, - max_low_pfn<<PAGE_SHIFT, - kva_pages<<PAGE_SHIFT, - PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT; - kva_target_pfn -= PTRS_PER_PTE; - } while (kva_start_pfn == MEMBLOCK_ERROR && kva_target_pfn > min_low_pfn); - + kva_start_pfn = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, + max_low_pfn << PAGE_SHIFT, + kva_pages << PAGE_SHIFT, + PTRS_PER_PTE << PAGE_SHIFT) >> PAGE_SHIFT; if (kva_start_pfn == MEMBLOCK_ERROR) panic("Can not get kva space\n"); |