diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2016-07-15 06:35:51 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2016-07-15 07:56:38 -0400 |
commit | eedd0f4cbf5f3b81e82649832091e1d9d53f0709 (patch) | |
tree | b806976542cf2731a4382414a28326706cb01f7c /kernel | |
parent | 7bd8830875bfa380c68f390efbad893293749324 (diff) | |
download | blackbird-op-linux-eedd0f4cbf5f3b81e82649832091e1d9d53f0709.tar.gz blackbird-op-linux-eedd0f4cbf5f3b81e82649832091e1d9d53f0709.zip |
cgroupns: Close race between cgroup_post_fork and copy_cgroup_ns
In most code paths involving cgroup migration cgroup_threadgroup_rwsem
is taken. There are two exceptions:
- remove_tasks_in_empty_cpuset calls cgroup_transfer_tasks
- vhost_attach_cgroups_work calls cgroup_attach_task_all
With cgroup_threadgroup_rwsem held it is guaranteed that cgroup_post_fork
and copy_cgroup_ns will reference the same css_set from the process calling
fork.
Without such an interlock there process after fork could reference one
css_set from it's new cgroup namespace and another css_set from
task->cgroups, which semantically is nonsensical.
Cc: stable@vger.kernel.org
Fixes: a79a908fd2b0 ("cgroup: introduce cgroup namespaces")
Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup.c | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5f01e00cffc4..e75efa819911 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2962,6 +2962,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) int retval = 0; mutex_lock(&cgroup_mutex); + percpu_down_write(&cgroup_threadgroup_rwsem); for_each_root(root) { struct cgroup *from_cgrp; @@ -2976,6 +2977,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) if (retval) break; } + percpu_up_write(&cgroup_threadgroup_rwsem); mutex_unlock(&cgroup_mutex); return retval; @@ -4343,6 +4345,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) mutex_lock(&cgroup_mutex); + percpu_down_write(&cgroup_threadgroup_rwsem); + /* all tasks in @from are being moved, all csets are source */ spin_lock_irq(&css_set_lock); list_for_each_entry(link, &from->cset_links, cset_link) @@ -4371,6 +4375,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from) } while (task && !ret); out_err: cgroup_migrate_finish(&preloaded_csets); + percpu_up_write(&cgroup_threadgroup_rwsem); mutex_unlock(&cgroup_mutex); return ret; } |