Messages in this thread Patch in this message | | | Date | Fri, 6 Dec 2019 15:15:36 -0800 | Subject | [PATCH v5 07/10] perf: simplify and rename visit_groups_merge | From | Ian Rogers <> |
| |
To enable a future caching optimization, pass in whether visit_groups_merge is operating on pinned or flexible groups. The is_pinned argument makes the func argument redundant, rename the function to ctx_groups_sched_in as it just schedules pinned or flexible groups in. Compute the cpu and groups arguments locally to reduce the argument list size. Remove sched_in_data as it repeats arguments already passed in. Merge pinned_sched_in and flexible_sched_in and use the pinned argument to determine the active list.
Signed-off-by: Ian Rogers <irogers@google.com> --- kernel/events/core.c | 149 ++++++++++++++----------------------------- 1 file changed, 49 insertions(+), 100 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c index 20e08d0c1cb9..3da9cc1ebc2d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -2133,7 +2133,6 @@ static void perf_group_detach(struct perf_event *event) if (!RB_EMPTY_NODE(&event->group_node)) { add_event_to_groups(sibling, event->ctx); - if (sibling->state == PERF_EVENT_STATE_ACTIVE) { struct list_head *list = sibling->attr.pinned ? &ctx->pinned_active : &ctx->flexible_active; @@ -2456,6 +2455,8 @@ event_sched_in(struct perf_event *event, { int ret = 0; + WARN_ON_ONCE(event->ctx != ctx); + lockdep_assert_held(&ctx->lock); if (event->state <= PERF_EVENT_STATE_OFF) @@ -3524,10 +3525,42 @@ static void __heap_add(struct min_heap *heap, struct perf_event *event) } } -static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, - struct perf_event_groups *groups, int cpu, - int (*func)(struct perf_event *, void *), - void *data) +static int merge_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + struct perf_event *event, + bool is_pinned, + int *can_add_hw) +{ + WARN_ON_ONCE(event->ctx != ctx); + + if (event->state <= PERF_EVENT_STATE_OFF) + return 0; + + if (!event_filter_match(event)) + return 0; + + if (group_can_go_on(event, cpuctx, 1)) { + if (!group_sched_in(event, cpuctx, ctx)) { + list_add_tail(&event->active_list, is_pinned + ? &ctx->pinned_active + : &ctx->flexible_active); + } + } + + if (event->state == PERF_EVENT_STATE_INACTIVE) { + if (is_pinned) + perf_event_set_state(event, PERF_EVENT_STATE_ERROR); + + *can_add_hw = 0; + ctx->rotate_necessary = 1; + } + + return 0; +} + +static int ctx_groups_sched_in(struct perf_event_context *ctx, + struct perf_cpu_context *cpuctx, + bool is_pinned) { #ifdef CONFIG_CGROUP_PERF struct cgroup_subsys_state *css = NULL; @@ -3537,9 +3570,13 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, struct min_heap event_heap; struct perf_event **evt; struct perf_event *next; - int ret; + int ret, can_add_hw = 1; + int cpu = smp_processor_id(); + struct perf_event_groups *groups = is_pinned + ? &ctx->pinned_groups + : &ctx->flexible_groups; - if (cpuctx) { + if (ctx == &cpuctx->ctx) { event_heap = (struct min_heap){ .data = cpuctx->itr_storage, .size = 0, @@ -3576,7 +3613,8 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, min_heapify_all(&event_heap, &perf_min_heap); while (event_heap.size) { - ret = func(*evt, data); + ret = merge_sched_in(ctx, cpuctx, *evt, is_pinned, &can_add_hw); + if (ret) return ret; @@ -3590,96 +3628,6 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx, return 0; } -struct sched_in_data { - struct perf_event_context *ctx; - struct perf_cpu_context *cpuctx; - int can_add_hw; -}; - -static int pinned_sched_in(struct perf_event *event, void *data) -{ - struct sched_in_data *sid = data; - - if (event->state <= PERF_EVENT_STATE_OFF) - return 0; - - if (!event_filter_match(event)) - return 0; - - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - if (!group_sched_in(event, sid->cpuctx, sid->ctx)) - list_add_tail(&event->active_list, &sid->ctx->pinned_active); - } - - /* - * If this pinned group hasn't been scheduled, - * put it in error state. - */ - if (event->state == PERF_EVENT_STATE_INACTIVE) - perf_event_set_state(event, PERF_EVENT_STATE_ERROR); - - return 0; -} - -static int flexible_sched_in(struct perf_event *event, void *data) -{ - struct sched_in_data *sid = data; - - if (event->state <= PERF_EVENT_STATE_OFF) - return 0; - - if (!event_filter_match(event)) - return 0; - - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - int ret = group_sched_in(event, sid->cpuctx, sid->ctx); - if (ret) { - sid->can_add_hw = 0; - sid->ctx->rotate_necessary = 1; - return 0; - } - list_add_tail(&event->active_list, &sid->ctx->flexible_active); - } - - return 0; -} - -static void -ctx_pinned_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) -{ - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; - - if (ctx != &cpuctx->ctx) - cpuctx = NULL; - - visit_groups_merge(cpuctx, &ctx->pinned_groups, - smp_processor_id(), - pinned_sched_in, &sid); -} - -static void -ctx_flexible_sched_in(struct perf_event_context *ctx, - struct perf_cpu_context *cpuctx) -{ - struct sched_in_data sid = { - .ctx = ctx, - .cpuctx = cpuctx, - .can_add_hw = 1, - }; - - if (ctx != &cpuctx->ctx) - cpuctx = NULL; - - visit_groups_merge(cpuctx, &ctx->flexible_groups, - smp_processor_id(), - flexible_sched_in, &sid); -} - static void ctx_sched_in(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, @@ -3716,11 +3664,12 @@ ctx_sched_in(struct perf_event_context *ctx, * in order to give them the best chance of going on. */ if (is_active & EVENT_PINNED) - ctx_pinned_sched_in(ctx, cpuctx); + ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/true); + /* Then walk through the lower prio flexible groups */ if (is_active & EVENT_FLEXIBLE) - ctx_flexible_sched_in(ctx, cpuctx); + ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/false); } static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, -- 2.24.0.393.g34dc348eaf-goog
| |