lkml.org 
[lkml]   [2019]   [Nov]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v3 05/10] perf/cgroup: Grow per perf_cpu_context heap storage
    From
    Allow the per-CPU min heap storage to have sufficient space for per-cgroup
    iterators.

    Based-on-work-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Ian Rogers <irogers@google.com>
    ---
    kernel/events/core.c | 47 ++++++++++++++++++++++++++++++++++++++++++++
    1 file changed, 47 insertions(+)

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index 0dab60bf5935..3c44be7de44e 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -892,6 +892,47 @@ static inline void perf_cgroup_sched_in(struct task_struct *prev,
    rcu_read_unlock();
    }

    +static int perf_cgroup_ensure_itr_storage_cap(struct perf_event *event,
    + struct cgroup_subsys_state *css)
    +{
    + struct perf_cpu_context *cpuctx;
    + struct perf_event **storage;
    + int cpu, itr_cap, ret = 0;
    +
    + /*
    + * Allow storage to have sufficent space for an iterator for each
    + * possibly nested cgroup plus an iterator for events with no cgroup.
    + */
    + for (itr_cap = 1; css; css = css->parent)
    + itr_cap++;
    +
    + for_each_possible_cpu(cpu) {
    + cpuctx = per_cpu_ptr(event->pmu->pmu_cpu_context, cpu);
    + if (itr_cap <= cpuctx->itr_storage_cap)
    + continue;
    +
    + storage = kmalloc_node(itr_cap * sizeof(struct perf_event *),
    + GFP_KERNEL, cpu_to_node(cpu));
    + if (!storage) {
    + ret = -ENOMEM;
    + break;
    + }
    +
    + raw_spin_lock_irq(&cpuctx->ctx.lock);
    + if (cpuctx->itr_storage_cap < itr_cap) {
    + swap(cpuctx->itr_storage, storage);
    + if (storage == cpuctx->itr_default)
    + storage = NULL;
    + cpuctx->itr_storage_cap = itr_cap;
    + }
    + raw_spin_unlock_irq(&cpuctx->ctx.lock);
    +
    + kfree(storage);
    + }
    +
    + return ret;
    +}
    +
    static inline int perf_cgroup_connect(int fd, struct perf_event *event,
    struct perf_event_attr *attr,
    struct perf_event *group_leader)
    @@ -911,6 +952,10 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
    goto out;
    }

    + ret = perf_cgroup_ensure_itr_storage_cap(event, css);
    + if (ret)
    + goto out;
    +
    cgrp = container_of(css, struct perf_cgroup, css);
    event->cgrp = cgrp;

    @@ -3421,6 +3466,8 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
    .size = 0,
    .cap = cpuctx->itr_storage_cap,
    };
    +
    + lockdep_assert_held(&cpuctx->ctx.lock);
    } else {
    event_heap = (struct min_max_heap){
    .data = itrs,
    --
    2.24.0.432.g9d3f5f5b63-goog
    \
     
     \ /
      Last update: 2019-11-14 01:32    [W:4.047 / U:0.236 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site