summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorGreg Thelen <gthelen@google.com>2012-11-28 22:50:44 +0100
committerTejun Heo <tj@kernel.org>2012-11-28 22:51:56 +0100
commit205a872bd6f9a9a09ef035ef1e90185a8245cc58 (patch)
tree9b39a5823dddf6319a021d4ab36c7cd35f612908 /kernel
parentcgroup: move list add after list head initilization (diff)
downloadlinux-205a872bd6f9a9a09ef035ef1e90185a8245cc58.tar.xz
linux-205a872bd6f9a9a09ef035ef1e90185a8245cc58.zip
cgroup: fix lockdep warning for event_control
The cgroup_event_wake() function is called with the wait queue head locked and it takes cgrp->event_list_lock. However, in cgroup_rmdir() remove_wait_queue() was being called after taking cgrp->event_list_lock. Correct the lock ordering by using a temporary list to obtain the event list to remove from the wait queue. Signed-off-by: Greg Thelen <gthelen@google.com> Signed-off-by: Aaron Durbin <adurbin@google.com> Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c11
1 files changed, 8 insertions, 3 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c02b05560d10..589433f7a74b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -4277,6 +4277,7 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
DEFINE_WAIT(wait);
struct cgroup_event *event, *tmp;
struct cgroup_subsys *ss;
+ LIST_HEAD(tmp_list);
lockdep_assert_held(&d->d_inode->i_mutex);
lockdep_assert_held(&cgroup_mutex);
@@ -4331,16 +4332,20 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
/*
* Unregister events and notify userspace.
* Notify userspace about cgroup removing only after rmdir of cgroup
- * directory to avoid race between userspace and kernelspace
+ * directory to avoid race between userspace and kernelspace. Use
+ * a temporary list to avoid a deadlock with cgroup_event_wake(). Since
+ * cgroup_event_wake() is called with the wait queue head locked,
+ * remove_wait_queue() cannot be called while holding event_list_lock.
*/
spin_lock(&cgrp->event_list_lock);
- list_for_each_entry_safe(event, tmp, &cgrp->event_list, list) {
+ list_splice_init(&cgrp->event_list, &tmp_list);
+ spin_unlock(&cgrp->event_list_lock);
+ list_for_each_entry_safe(event, tmp, &tmp_list, list) {
list_del(&event->list);
remove_wait_queue(event->wqh, &event->wait);
eventfd_signal(event->eventfd, 1);
schedule_work(&event->remove);
}
- spin_unlock(&cgrp->event_list_lock);
return 0;
}