summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMilton Miller <miltonm@bga.com>2007-10-15 19:00:19 +0400
committerIngo Molnar <mingo@elte.hu>2007-10-15 19:00:19 +0400
commit6382bc90f5664c450afc1f896e7ddb35ba182af9 (patch)
tree01e40b9d5f44bfeed712cf8d394a16aa93598436
parent97b6ea7b6369d51a451a7d5747a7939a593fdd9c (diff)
downloadlinux-6382bc90f5664c450afc1f896e7ddb35ba182af9.tar.xz
sched: domain sysctl fixes: unregister the sysctl table before domains
Unregister and free the sysctl table before destroying domains, then rebuild and register after creating the new domains. This prevents the sysctl table from pointing to freed memory for root to write. Signed-off-by: Milton Miller <miltonm@bga.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index 374f42170d6d..a2dd05435534 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5250,6 +5250,18 @@ static struct ctl_table *sd_alloc_ctl_entry(int n)
return entry;
}
+static void sd_free_ctl_entry(struct ctl_table **tablep)
+{
+ struct ctl_table *entry = *tablep;
+
+ for (entry = *tablep; entry->procname; entry++)
+ if (entry->child)
+ sd_free_ctl_entry(&entry->child);
+
+ kfree(*tablep);
+ *tablep = NULL;
+}
+
static void
set_table_entry(struct ctl_table *entry,
const char *procname, void *data, int maxlen,
@@ -5318,7 +5330,7 @@ static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
}
static struct ctl_table_header *sd_sysctl_header;
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
{
int i, cpu_num = num_online_cpus();
struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
@@ -5335,8 +5347,18 @@ static void init_sched_domain_sysctl(void)
}
sd_sysctl_header = register_sysctl_table(sd_ctl_root);
}
+
+static void unregister_sched_domain_sysctl(void)
+{
+ unregister_sysctl_table(sd_sysctl_header);
+ sd_sysctl_header = NULL;
+ sd_free_ctl_entry(&sd_ctl_dir[0].child);
+}
#else
-static void init_sched_domain_sysctl(void)
+static void register_sched_domain_sysctl(void)
+{
+}
+static void unregister_sched_domain_sysctl(void)
{
}
#endif
@@ -6271,6 +6293,8 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
err = build_sched_domains(&cpu_default_map);
+ register_sched_domain_sysctl();
+
return err;
}
@@ -6287,6 +6311,8 @@ static void detach_destroy_domains(const cpumask_t *cpu_map)
{
int i;
+ unregister_sched_domain_sysctl();
+
for_each_cpu_mask(i, *cpu_map)
cpu_attach_domain(NULL, i);
synchronize_sched();
@@ -6317,6 +6343,8 @@ int partition_sched_domains(cpumask_t *partition1, cpumask_t *partition2)
if (!err && !cpus_empty(*partition2))
err = build_sched_domains(partition2);
+ register_sched_domain_sysctl();
+
return err;
}
@@ -6448,8 +6476,6 @@ void __init sched_init_smp(void)
/* XXX: Theoretical race here - CPU may be hotplugged now */
hotcpu_notifier(update_sched_domains, 0);
- init_sched_domain_sysctl();
-
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();