summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorChengming Zhou <zhouchengming@bytedance.com>2022-03-29 17:45:23 +0200
committerPeter Zijlstra <peterz@infradead.org>2022-04-05 09:59:45 +0200
commite19cd0b6fa5938c51d7b928010d584f0de93913a (patch)
tree4a1f067ea20e6d3ed7c53fc08129b694f7ef4350 /kernel
parentperf/core: Fix perf_cgroup_switch() (diff)
downloadlinux-e19cd0b6fa5938c51d7b928010d584f0de93913a.tar.xz
linux-e19cd0b6fa5938c51d7b928010d584f0de93913a.zip
perf/core: Always set cpuctx cgrp when enable cgroup event
When enable a cgroup event, cpuctx->cgrp setting is conditional on the current task cgrp matching the event's cgroup, so have to do it for every new event. It brings complexity but no advantage. To keep it simple, this patch would always set cpuctx->cgrp when enable the first cgroup event, and reset to NULL when disable the last cgroup event. Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Link: https://lore.kernel.org/r/20220329154523.86438-5-zhouchengming@bytedance.com
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c18
1 files changed, 2 insertions, 16 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index bdeb41fe7f15..23bb19716ad3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -967,22 +967,10 @@ perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ct
*/
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
- /*
- * Since setting cpuctx->cgrp is conditional on the current @cgrp
- * matching the event's cgroup, we must do this for every new event,
- * because if the first would mismatch, the second would not try again
- * and we would leave cpuctx->cgrp unset.
- */
- if (ctx->is_active && !cpuctx->cgrp) {
- struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
-
- if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
- cpuctx->cgrp = cgrp;
- }
-
if (ctx->nr_cgroups++)
return;
+ cpuctx->cgrp = perf_cgroup_from_task(current, ctx);
list_add(&cpuctx->cgrp_cpuctx_entry,
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
}
@@ -1004,9 +992,7 @@ perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *c
if (--ctx->nr_cgroups)
return;
- if (ctx->is_active && cpuctx->cgrp)
- cpuctx->cgrp = NULL;
-
+ cpuctx->cgrp = NULL;
list_del(&cpuctx->cgrp_cpuctx_entry);
}