summaryrefslogtreecommitdiff
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2011-03-04 17:17:21 +0300
committerTejun Heo <tj@kernel.org>2011-03-04 17:17:21 +0300
commitc09cedf4f75f1e47ea17f55e18e9cfb81bec8575 (patch)
treedeff33cfb836cc7797916062c7d244fb71d3ac30 /arch/x86/mm
parent51b361b4009f4e19ae68d2bcbb35e254e91b6054 (diff)
downloadlinux-c09cedf4f75f1e47ea17f55e18e9cfb81bec8575.tar.xz
x86-64, NUMA: Clean up initmem_init()
This patch cleans initmem_init() so that it is more readable and doesn't use an unnecessary array of function pointers to convolute the flow of the code. It also makes it obvious that dummy_numa_init() will always succeed (and documents that requirement) so that the existing BUG() is never actually reached. No functional change. -tj: Updated comment for dummy_numa_init() slightly. Signed-off-by: David Rientjes <rientjes@google.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/numa_64.c94
1 files changed, 55 insertions, 39 deletions
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 86491ba568d9..9ec0f209a6a4 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -562,6 +562,15 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
return 0;
}
+/**
+ * dummy_numma_init - Fallback dummy NUMA init
+ *
+ * Used if there's no underlying NUMA architecture, NUMA initialization
+ * fails, or NUMA is disabled on the command line.
+ *
+ * Must online at least one node and add memory blocks that cover all
+ * allowed memory. This function must not fail.
+ */
static int __init dummy_numa_init(void)
{
printk(KERN_INFO "%s\n",
@@ -575,57 +584,64 @@ static int __init dummy_numa_init(void)
return 0;
}
-void __init initmem_init(void)
+static int __init numa_init(int (*init_func)(void))
{
- int (*numa_init[])(void) = { [2] = dummy_numa_init };
- int i, j;
-
- if (!numa_off) {
-#ifdef CONFIG_ACPI_NUMA
- numa_init[0] = x86_acpi_numa_init;
-#endif
-#ifdef CONFIG_AMD_NUMA
- numa_init[1] = amd_numa_init;
-#endif
- }
+ int i;
+ int ret;
- for (i = 0; i < ARRAY_SIZE(numa_init); i++) {
- if (!numa_init[i])
- continue;
+ for (i = 0; i < MAX_LOCAL_APIC; i++)
+ set_apicid_to_node(i, NUMA_NO_NODE);
- for (j = 0; j < MAX_LOCAL_APIC; j++)
- set_apicid_to_node(j, NUMA_NO_NODE);
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+ remove_all_active_ranges();
+ numa_reset_distance();
- nodes_clear(numa_nodes_parsed);
- nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
- memset(&numa_meminfo, 0, sizeof(numa_meminfo));
- remove_all_active_ranges();
- numa_reset_distance();
+ ret = init_func();
+ if (ret < 0)
+ return ret;
+ ret = numa_cleanup_meminfo(&numa_meminfo);
+ if (ret < 0)
+ return ret;
- if (numa_init[i]() < 0)
- continue;
+ numa_emulation(&numa_meminfo, numa_distance_cnt);
- if (numa_cleanup_meminfo(&numa_meminfo) < 0)
- continue;
+ ret = numa_register_memblks(&numa_meminfo);
+ if (ret < 0)
+ return ret;
- numa_emulation(&numa_meminfo, numa_distance_cnt);
+ for (i = 0; i < nr_cpu_ids; i++) {
+ int nid = early_cpu_to_node(i);
- if (numa_register_memblks(&numa_meminfo) < 0)
+ if (nid == NUMA_NO_NODE)
continue;
+ if (!node_online(nid))
+ numa_clear_node(i);
+ }
+ numa_init_array();
+ return 0;
+}
- for (j = 0; j < nr_cpu_ids; j++) {
- int nid = early_cpu_to_node(j);
+void __init initmem_init(void)
+{
+ int ret;
- if (nid == NUMA_NO_NODE)
- continue;
- if (!node_online(nid))
- numa_clear_node(j);
- }
- numa_init_array();
- return;
+ if (!numa_off) {
+#ifdef CONFIG_ACPI_NUMA
+ ret = numa_init(x86_acpi_numa_init);
+ if (!ret)
+ return;
+#endif
+#ifdef CONFIG_AMD_NUMA
+ ret = numa_init(amd_numa_init);
+ if (!ret)
+ return;
+#endif
}
- BUG();
+
+ numa_init(dummy_numa_init);
}
unsigned long __init numa_free_all_bootmem(void)