lkml.org 
[lkml]   [2019]   [Nov]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v3 03/10] perf: Use min_max_heap in visit_groups_merge
    From
    Based-on-work-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Ian Rogers <irogers@google.com>
    ---
    kernel/events/core.c | 67 +++++++++++++++++++++++++++++++++-----------
    1 file changed, 50 insertions(+), 17 deletions(-)

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index 0dce28b0aae0..a5a3d349a8f1 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -49,6 +49,7 @@
    #include <linux/sched/mm.h>
    #include <linux/proc_ns.h>
    #include <linux/mount.h>
    +#include <linux/min_max_heap.h>

    #include "internal.h"

    @@ -3372,32 +3373,64 @@ static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
    ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
    }

    -static int visit_groups_merge(struct perf_event_groups *groups, int cpu,
    +static bool perf_cmp_group_idx(const void *l, const void *r)
    +{
    + const struct perf_event *le = l, *re = r;
    +
    + return le->group_index < re->group_index;
    +}
    +
    +static void swap_ptr(void *l, void *r)
    +{
    + void **lp = l, **rp = r;
    +
    + swap(*lp, *rp);
    +}
    +
    +static const struct min_max_heap_callbacks perf_min_heap = {
    + .elem_size = sizeof(struct perf_event *),
    + .cmp = perf_cmp_group_idx,
    + .swp = swap_ptr,
    +};
    +
    +static void __heap_add(struct min_max_heap *heap, struct perf_event *event)
    +{
    + struct perf_event **itrs = heap->data;
    +
    + if (event) {
    + itrs[heap->size] = event;
    + heap->size++;
    + }
    +}
    +
    +static noinline int visit_groups_merge(struct perf_event_groups *groups, int cpu,
    int (*func)(struct perf_event *, void *), void *data)
    {
    - struct perf_event **evt, *evt1, *evt2;
    + /* Space for per CPU and/or any CPU event iterators. */
    + struct perf_event *itrs[2];
    + struct min_max_heap event_heap = {
    + .data = itrs,
    + .size = 0,
    + .cap = ARRAY_SIZE(itrs),
    + };
    + struct perf_event *next;
    int ret;

    - evt1 = perf_event_groups_first(groups, -1);
    - evt2 = perf_event_groups_first(groups, cpu);
    + __heap_add(&event_heap, perf_event_groups_first(groups, -1));
    + __heap_add(&event_heap, perf_event_groups_first(groups, cpu));

    - while (evt1 || evt2) {
    - if (evt1 && evt2) {
    - if (evt1->group_index < evt2->group_index)
    - evt = &evt1;
    - else
    - evt = &evt2;
    - } else if (evt1) {
    - evt = &evt1;
    - } else {
    - evt = &evt2;
    - }
    + heapify_all(&event_heap, &perf_min_heap);

    - ret = func(*evt, data);
    + while (event_heap.size) {
    + ret = func(itrs[0], data);
    if (ret)
    return ret;

    - *evt = perf_event_groups_next(*evt);
    + next = perf_event_groups_next(itrs[0]);
    + if (next)
    + heap_pop_push(&event_heap, &next, &perf_min_heap);
    + else
    + heap_pop(&event_heap, &perf_min_heap);
    }

    return 0;
    --
    2.24.0.432.g9d3f5f5b63-goog
    \
     
     \ /
      Last update: 2019-11-14 01:32    [W:4.154 / U:1.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site