summaryrefslogtreecommitdiffstats
path: root/kernel/cgroup.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2014-02-11 17:52:47 +0100
committerTejun Heo <tj@kernel.org>2014-02-11 17:52:47 +0100
commit4ac0601744eb86e982fbdadde35f1945f7ce5882 (patch)
tree887f0cbc2853802ad113b87f0d2c5b764e2619ce /kernel/cgroup.c
parentcgroup: introduce cgroup_tree_mutex (diff)
downloadlinux-4ac0601744eb86e982fbdadde35f1945f7ce5882.tar.xz
linux-4ac0601744eb86e982fbdadde35f1945f7ce5882.zip
cgroup: release cgroup_mutex over file removals
Now that cftypes and all tree modification operations are protected by cgroup_tree_mutex, we can drop cgroup_mutex while deleting files and directories. Drop cgroup_mutex over removals. This doesn't make any noticeable difference now but is to help kernfs conversion. In kernfs, removals are sync points which drain in-flight operations as those operations would grab cgroup_mutex, trying to delete under cgroup_mutex would deadlock. This can be resolved by just holding the outer cgroup_tree_mutex which nests outside both kernfs active reference and cgroup_mutex. Signed-off-by: Tejun Heo <tj@kernel.org> Acked-by: Li Zefan <lizefan@huawei.com>
Diffstat (limited to '')
-rw-r--r--kernel/cgroup.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index cb20d12cb096..d28cf75f33c1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -976,7 +976,9 @@ static int rebind_subsystems(struct cgroupfs_root *root,
* Nothing can fail from this point on. Remove files for the
* removed subsystems and rebind each subsystem.
*/
+ mutex_unlock(&cgroup_mutex);
cgroup_clear_dir(cgrp, removed_mask);
+ mutex_lock(&cgroup_mutex);
for_each_subsys(ss, i) {
unsigned long bit = 1UL << i;
@@ -2696,10 +2698,11 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
u64 update_before;
int ret = 0;
+ mutex_unlock(&cgroup_mutex);
+
/* %NULL @cfts indicates abort and don't bother if @ss isn't attached */
if (!cfts || ss->root == &cgroup_dummy_root ||
!atomic_inc_not_zero(&sb->s_active)) {
- mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgroup_tree_mutex);
return 0;
}
@@ -2723,18 +2726,15 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
dput(prev);
prev = cgrp->dentry;
- mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgroup_tree_mutex);
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_tree_mutex);
- mutex_lock(&cgroup_mutex);
if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
ret = cgroup_addrm_files(cgrp, cfts, is_add);
mutex_unlock(&inode->i_mutex);
if (ret)
break;
}
- mutex_unlock(&cgroup_mutex);
mutex_unlock(&cgroup_tree_mutex);
dput(prev);
deactivate_super(sb);
@@ -4387,10 +4387,13 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
/*
* Initiate massacre of all css's. cgroup_destroy_css_killed()
* will be invoked to perform the rest of destruction once the
- * percpu refs of all css's are confirmed to be killed.
+ * percpu refs of all css's are confirmed to be killed. This
+ * involves removing the subsystem's files, drop cgroup_mutex.
*/
+ mutex_unlock(&cgroup_mutex);
for_each_css(css, ssid, cgrp)
kill_css(css);
+ mutex_lock(&cgroup_mutex);
/*
* Mark @cgrp dead. This prevents further task migration and child
@@ -4421,9 +4424,11 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* puts the base ref but we aren't quite done with @cgrp yet, so
* hold onto it.
*/
+ mutex_unlock(&cgroup_mutex);
cgroup_addrm_files(cgrp, cgroup_base_files, false);
dget(d);
cgroup_d_remove_dir(d);
+ mutex_lock(&cgroup_mutex);
return 0;
};