summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-04-16 03:35:52 +0400
committerIngo Molnar <mingo@elte.hu>2008-04-19 21:44:59 +0400
commit4bdbaad33d0f4d0e9818a38a825f5b75c0296a28 (patch)
tree36d8ac15fb3d2d4ccb939327a786f8327a403b98 /kernel
parent9d1fe3236a1d64ab687e16b4cbbaa1383352a2c1 (diff)
downloadlinux-4bdbaad33d0f4d0e9818a38a825f5b75c0296a28.tar.xz
sched: remove another cpumask_t variable from stack
* Remove another cpumask_t variable from stack that was missed in the last kernel_sched_c updates. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched.c15
1 files changed, 6 insertions, 9 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 6809178eaa9d..b56d98b01267 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -6501,27 +6501,24 @@ static int find_next_best_node(int node, nodemask_t *used_nodes)
* should be one that prevents unnecessary balancing, but also spreads tasks
* out optimally.
*/
-static cpumask_t sched_domain_node_span(int node)
+static void sched_domain_node_span(int node, cpumask_t *span)
{
nodemask_t used_nodes;
- cpumask_t span;
node_to_cpumask_ptr(nodemask, node);
int i;
- cpus_clear(span);
+ cpus_clear(*span);
nodes_clear(used_nodes);
- cpus_or(span, span, *nodemask);
+ cpus_or(*span, *span, *nodemask);
node_set(node, used_nodes);
for (i = 1; i < SD_NODES_PER_DOMAIN; i++) {
int next_node = find_next_best_node(node, &used_nodes);
node_to_cpumask_ptr_next(nodemask, next_node);
- cpus_or(span, span, *nodemask);
+ cpus_or(*span, *span, *nodemask);
}
-
- return span;
}
#endif
@@ -6883,7 +6880,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
sd = &per_cpu(node_domains, i);
SD_INIT(sd, NODE);
- sd->span = sched_domain_node_span(cpu_to_node(i));
+ sched_domain_node_span(cpu_to_node(i), &sd->span);
sd->parent = p;
if (p)
p->child = sd;
@@ -6998,7 +6995,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
continue;
}
- *domainspan = sched_domain_node_span(i);
+ sched_domain_node_span(i, domainspan);
cpus_and(*domainspan, *domainspan, *cpu_map);
sg = kmalloc_node(sizeof(struct sched_group), GFP_KERNEL, i);