diff options
author | Tejun Heo <tj@kernel.org> | 2013-01-07 20:51:07 +0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2013-01-07 20:51:07 +0400 |
commit | 4e4c9a140fc2ecf5e086922ccd2022bdabe509b6 (patch) | |
tree | 80bc4759956732ae68d1972cc6cd538c64bba1c8 /kernel/cpuset.c | |
parent | ae8086ce15fdab2b57599d7a3242a114ba4b8597 (diff) | |
download | linux-4e4c9a140fc2ecf5e086922ccd2022bdabe509b6.tar.xz |
cpuset: cleanup cpuset[_can]_attach()
cpuset_can_attach() prepare global variables cpus_attach and
cpuset_attach_nodemask_{to|from} which are used by cpuset_attach().
There is no reason to prepare in cpuset_can_attach(). The same
information can be accessed from cpuset_attach().
Move the prepartion logic from cpuset_can_attach() to cpuset_attach()
and make the global variables static ones inside cpuset_attach().
With this change, there's no reason to keep
cpuset_attach_nodemask_{from|to} global. Move them inside
cpuset_attach(). Unfortunately, we need to keep cpus_attach global as
it can't be allocated from cpuset_attach().
v2: cpus_attach not converted to cpumask_t as per Li Zefan and Rusty
Russell.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Li Zefan <lizefan@huawei.com>
Cc: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/cpuset.c')
-rw-r--r-- | kernel/cpuset.c | 35 |
1 files changed, 18 insertions, 17 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4b054b9faf3d..c5edc6b3eb28 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -1395,15 +1395,6 @@ static int fmeter_getrate(struct fmeter *fmp) return val; } -/* - * Protected by cgroup_lock. The nodemasks must be stored globally because - * dynamically allocating them is not allowed in can_attach, and they must - * persist until attach. - */ -static cpumask_var_t cpus_attach; -static nodemask_t cpuset_attach_nodemask_from; -static nodemask_t cpuset_attach_nodemask_to; - /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { @@ -1430,19 +1421,21 @@ static int cpuset_can_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) return ret; } - /* prepare for attach */ - if (cs == &top_cpuset) - cpumask_copy(cpus_attach, cpu_possible_mask); - else - guarantee_online_cpus(cs, cpus_attach); - - guarantee_online_mems(cs, &cpuset_attach_nodemask_to); - return 0; } +/* + * Protected by cgroup_mutex. cpus_attach is used only by cpuset_attach() + * but we can't allocate it dynamically there. Define it global and + * allocate from cpuset_init(). + */ +static cpumask_var_t cpus_attach; + static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) { + /* static bufs protected by cgroup_mutex */ + static nodemask_t cpuset_attach_nodemask_from; + static nodemask_t cpuset_attach_nodemask_to; struct mm_struct *mm; struct task_struct *task; struct task_struct *leader = cgroup_taskset_first(tset); @@ -1450,6 +1443,14 @@ static void cpuset_attach(struct cgroup *cgrp, struct cgroup_taskset *tset) struct cpuset *cs = cgroup_cs(cgrp); struct cpuset *oldcs = cgroup_cs(oldcgrp); + /* prepare for attach */ + if (cs == &top_cpuset) + cpumask_copy(cpus_attach, cpu_possible_mask); + else + guarantee_online_cpus(cs, cpus_attach); + + guarantee_online_mems(cs, &cpuset_attach_nodemask_to); + cgroup_taskset_for_each(task, cgrp, tset) { /* * can_attach beforehand should guarantee that this doesn't |