summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-08-30 19:31:59 +0300
committerLinus Torvalds <torvalds@linux-foundation.org>2016-08-30 19:31:59 +0300
commit748e7fc20983fccd742e93c5b6a38ece1f71f80f (patch)
treea097b9e7875647416ca3a0661809a3ed63d2d21a /kernel
parente4e98c460ad38c78498622a164fd5ef09a2dc9cb (diff)
parent568ac888215c7fb2fabe8ea739b00ec3c1f5d440 (diff)
downloadlinux-748e7fc20983fccd742e93c5b6a38ece1f71f80f.tar.xz
Merge branch 'for-4.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup fixes from Tejun Heo: "Two fixes for cgroup. - There still was a hole in enforcing cpuset rules, fixed by Li. - The recent switch to global percpu_rwseom for threadgroup locking revealed a couple issues in how percpu_rwsem is implemented and used by cgroup. Balbir found that the read locking section was too wide unnecessarily including operations which can often depend on IOs. With percpu_rwsem updates (coming through a different tree) and reduction of read locking section, all the reported locking latency issues, including the android one, are resolved. It looks like we can keep global percpu_rwsem locking for now. If there actually are cases which can't be resolved, we can go back to more complex per-signal_struct locking" * 'for-4.8-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: cgroup: reduce read locked section of cgroup_threadgroup_rwsem during fork cpuset: make sure new tasks conform to the current config of the cpuset
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpuset.c15
-rw-r--r--kernel/fork.c4
2 files changed, 17 insertions, 2 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index c7fd2778ed50..c27e53326bef 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2069,6 +2069,20 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
mutex_unlock(&cpuset_mutex);
}
+/*
+ * Make sure the new task conform to the current state of its parent,
+ * which could have been changed by cpuset just after it inherits the
+ * state from the parent and before it sits on the cgroup's task list.
+ */
+void cpuset_fork(struct task_struct *task)
+{
+ if (task_css_is_root(task, cpuset_cgrp_id))
+ return;
+
+ set_cpus_allowed_ptr(task, &current->cpus_allowed);
+ task->mems_allowed = current->mems_allowed;
+}
+
struct cgroup_subsys cpuset_cgrp_subsys = {
.css_alloc = cpuset_css_alloc,
.css_online = cpuset_css_online,
@@ -2079,6 +2093,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.attach = cpuset_attach,
.post_attach = cpuset_post_attach,
.bind = cpuset_bind,
+ .fork = cpuset_fork,
.legacy_cftypes = files,
.early_init = true,
};
diff --git a/kernel/fork.c b/kernel/fork.c
index 52e725d4a866..aaf782327bf3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1404,7 +1404,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
p->real_start_time = ktime_get_boot_ns();
p->io_context = NULL;
p->audit_context = NULL;
- threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
@@ -1556,6 +1555,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
+ threadgroup_change_begin(current);
/*
* Ensure that the cgroup subsystem policies allow the new process to be
* forked. It should be noted the the new process's css_set can be changed
@@ -1656,6 +1656,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
bad_fork_cancel_cgroup:
cgroup_cancel_fork(p);
bad_fork_free_pid:
+ threadgroup_change_end(current);
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_thread:
@@ -1688,7 +1689,6 @@ bad_fork_cleanup_policy:
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
#endif
- threadgroup_change_end(current);
delayacct_tsk_free(p);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);