diff options
author | Roman Gushchin <guro@fb.com> | 2019-04-19 19:03:03 +0200 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2019-04-19 20:26:48 +0200 |
commit | 4dcabece4c3a9f9522127be12cc12cc120399b2f (patch) | |
tree | 0d985b3d1232ff76e67a0ac07213e4c8e4ab4a7b /kernel | |
parent | cgroup: implement __cgroup_task_count() helper (diff) | |
download | linux-4dcabece4c3a9f9522127be12cc12cc120399b2f.tar.xz linux-4dcabece4c3a9f9522127be12cc12cc120399b2f.zip |
cgroup: protect cgroup->nr_(dying_)descendants by css_set_lock
The number of descendant cgroups and the number of dying
descendant cgroups are currently synchronized using the cgroup_mutex.
The number of descendant cgroups will be required by the cgroup v2
freezer, which will use it to determine if a cgroup is frozen
(depending on total number of descendants and number of frozen
descendants). It's not always acceptable to grab the cgroup_mutex,
especially from quite hot paths (e.g. exit()).
To avoid this, let's additionally synchronize these counters using
the css_set_lock.
So, it's safe to read these counters with either cgroup_mutex or
css_set_lock locked, and for changing both locks should be acquired.
Signed-off-by: Roman Gushchin <guro@fb.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: kernel-team@fb.com
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cgroup/cgroup.c | 6 |
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c index 3008ea684aa0..786ceef2f222 100644 --- a/kernel/cgroup/cgroup.c +++ b/kernel/cgroup/cgroup.c @@ -4811,9 +4811,11 @@ static void css_release_work_fn(struct work_struct *work) if (cgroup_on_dfl(cgrp)) cgroup_rstat_flush(cgrp); + spin_lock_irq(&css_set_lock); for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) tcgrp->nr_dying_descendants--; + spin_unlock_irq(&css_set_lock); cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); cgrp->id = -1; @@ -5031,12 +5033,14 @@ static struct cgroup *cgroup_create(struct cgroup *parent) if (ret) goto out_psi_free; + spin_lock_irq(&css_set_lock); for (tcgrp = cgrp; tcgrp; tcgrp = cgroup_parent(tcgrp)) { cgrp->ancestor_ids[tcgrp->level] = tcgrp->id; if (tcgrp != cgrp) tcgrp->nr_descendants++; } + spin_unlock_irq(&css_set_lock); if (notify_on_release(parent)) set_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); @@ -5321,10 +5325,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp) if (parent && cgroup_is_threaded(cgrp)) parent->nr_threaded_children--; + spin_lock_irq(&css_set_lock); for (tcgrp = cgroup_parent(cgrp); tcgrp; tcgrp = cgroup_parent(tcgrp)) { tcgrp->nr_descendants--; tcgrp->nr_dying_descendants++; } + spin_unlock_irq(&css_set_lock); cgroup1_check_for_release(parent); |