diff options
author | Peter Zijlstra <peterz@infradead.org> | 2019-08-07 11:45:01 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2020-03-06 11:56:58 +0100 |
commit | 2c2366c7548ecee65adfd264517ddf50f9e2d029 (patch) | |
tree | dede9e3fc549abd9b799cc95858b5470901bf412 /kernel | |
parent | perf/core: Unify {pinned,flexible}_sched_in() (diff) | |
download | linux-2c2366c7548ecee65adfd264517ddf50f9e2d029.tar.xz linux-2c2366c7548ecee65adfd264517ddf50f9e2d029.zip |
perf/core: Remove 'struct sched_in_data'
We can deduce the ctx and cpuctx from the event, no need to pass them
along. Remove the structure and pass in can_add_hw directly.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 36 |
1 files changed, 11 insertions, 25 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b713080eedcc..b7eaabaee76f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3423,17 +3423,11 @@ static int visit_groups_merge(struct perf_event_groups *groups, int cpu, return 0; } -struct sched_in_data { - struct perf_event_context *ctx; - struct perf_cpu_context *cpuctx; - int can_add_hw; -}; - static int merge_sched_in(struct perf_event *event, void *data) { - struct sched_in_data *sid = data; - - WARN_ON_ONCE(event->ctx != sid->ctx); + struct perf_event_context *ctx = event->ctx; + struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); + int *can_add_hw = data; if (event->state <= PERF_EVENT_STATE_OFF) return 0; @@ -3441,8 +3435,8 @@ static int merge_sched_in(struct perf_event *event, void *data) if (!event_filter_match(event)) return 0; - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - if (!group_sched_in(event, sid->cpuctx, sid->ctx)) + if (group_can_go_on(event, cpuctx, *can_add_hw)) { + if (!group_sched_in(event, cpuctx, ctx)) list_add_tail(&event->active_list, get_event_list(event)); } @@ -3450,8 +3444,8 @@ static int merge_sched_in(struct perf_event *event, void *data) if (event->attr.pinned) perf_event_set_state(event, PERF_EVENT_STATE_ERROR); - sid->can_add_hw = 0; - sid->ctx->rotate_necessary = 1; + *can_add_hw = 0; + ctx->rotate_necessary = 1; } return 0; @@ -3461,30 +3455,22 @@ static void ctx_pinned_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; + int can_add_hw = 1; visit_groups_merge(&ctx->pinned_groups, smp_processor_id(), - merge_sched_in, &sid); + merge_sched_in, &can_add_hw); } static void ctx_flexible_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx) { - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; + int can_add_hw = 1; visit_groups_merge(&ctx->flexible_groups, smp_processor_id(), - merge_sched_in, &sid); + merge_sched_in, &can_add_hw); } static void |