lkml.org 
[lkml]   [2021]   [Jun]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    From
    SubjectRe: [PATCH 4/4] perf stat: Enable BPF counter with --for-each-cgroup
    Date


    > On Jun 25, 2021, at 12:18 AM, Namhyung Kim <namhyung@kernel.org> wrote:
    >
    > Recently bperf was added to use BPF to count perf events for various
    > purposes. This is an extension for the approach and targetting to
    > cgroup usages.
    >
    > Unlike the other bperf, it doesn't share the events with other
    > processes but it'd reduce unnecessary events (and the overhead of
    > multiplexing) for each monitored cgroup within the perf session.
    >
    > When --for-each-cgroup is used with --bpf-counters, it will open
    > cgroup-switches event per cpu internally and attach the new BPF
    > program to read given perf_events and to aggregate the results for
    > cgroups. It's only called when task is switched to a task in a
    > different cgroup.
    >
    > Cc: Song Liu <songliubraving@fb.com>
    > Signed-off-by: Namhyung Kim <namhyung@kernel.org>
    > ---
    > tools/perf/Makefile.perf | 17 +-
    > tools/perf/util/Build | 1 +
    > tools/perf/util/bpf_counter.c | 5 +
    > tools/perf/util/bpf_counter_cgroup.c | 299 ++++++++++++++++++++
    > tools/perf/util/bpf_skel/bperf_cgroup.bpf.c | 191 +++++++++++++
    > tools/perf/util/cgroup.c | 2 +
    > tools/perf/util/cgroup.h | 1 +
    > 7 files changed, 515 insertions(+), 1 deletion(-)
    > create mode 100644 tools/perf/util/bpf_counter_cgroup.c
    > create mode 100644 tools/perf/util/bpf_skel/bperf_cgroup.bpf.c

    [...]

    > diff --git a/tools/perf/util/bpf_counter_cgroup.c b/tools/perf/util/bpf_counter_cgroup.c
    > new file mode 100644
    > index 000000000000..327f97a23a84
    > --- /dev/null
    > +++ b/tools/perf/util/bpf_counter_cgroup.c
    > @@ -0,0 +1,299 @@
    > +// SPDX-License-Identifier: GPL-2.0
    > +
    > +/* Copyright (c) 2019 Facebook */

    I am not sure whether this ^^^ is accurate.

    > +/* Copyright (c) 2021 Google */
    > +
    > +#include <assert.h>
    > +#include <limits.h>
    > +#include <unistd.h>
    > +#include <sys/file.h>
    > +#include <sys/time.h>
    > +#include <sys/resource.h>
    > +#include <linux/err.h>
    > +#include <linux/zalloc.h>
    > +#include <linux/perf_event.h>
    > +#include <api/fs/fs.h>
    > +#include <perf/bpf_perf.h>
    > +
    > +#include "affinity.h"
    > +#include "bpf_counter.h"
    > +#include "cgroup.h"
    > +#include "counts.h"
    > +#include "debug.h"
    > +#include "evsel.h"
    > +#include "evlist.h"
    > +#include "target.h"
    > +#include "cpumap.h"
    > +#include "thread_map.h"
    > +
    > +#include "bpf_skel/bperf_cgroup.skel.h"
    > +
    > +static struct perf_event_attr cgrp_switch_attr = {
    > + .type = PERF_TYPE_SOFTWARE,
    > + .config = PERF_COUNT_SW_CGROUP_SWITCHES,
    > + .size = sizeof(cgrp_switch_attr),
    > + .sample_period = 1,
    > + .disabled = 1,
    > +};
    > +
    > +static struct evsel *cgrp_switch;
    > +static struct bperf_cgroup_bpf *skel;
    > +
    > +#define FD(evt, cpu) (*(int *)xyarray__entry(evt->core.fd, cpu, 0))
    > +
    > +static int bperf_load_program(struct evlist *evlist)
    > +{
    > + struct bpf_link *link;
    > + struct evsel *evsel;
    > + struct cgroup *cgrp, *leader_cgrp;
    > + __u32 i, cpu;
    > + int nr_cpus = evlist->core.all_cpus->nr;
    > + int total_cpus = cpu__max_cpu();
    > + int map_size, map_fd;
    > + int prog_fd, err;
    > +
    > + skel = bperf_cgroup_bpf__open();
    > + if (!skel) {
    > + pr_err("Failed to open cgroup skeleton\n");
    > + return -1;
    > + }
    > +
    > + skel->rodata->num_cpus = total_cpus;
    > + skel->rodata->num_events = evlist->core.nr_entries / nr_cgroups;
    > +
    > + BUG_ON(evlist->core.nr_entries % nr_cgroups != 0);
    > +
    > + /* we need one copy of events per cpu for reading */
    > + map_size = total_cpus * evlist->core.nr_entries / nr_cgroups;
    > + bpf_map__resize(skel->maps.events, map_size);
    > + bpf_map__resize(skel->maps.cgrp_idx, nr_cgroups);
    > + /* previous result is saved in a per-cpu array */
    > + map_size = evlist->core.nr_entries / nr_cgroups;
    > + bpf_map__resize(skel->maps.prev_readings, map_size);
    > + /* cgroup result needs all events (per-cpu) */
    > + map_size = evlist->core.nr_entries;
    > + bpf_map__resize(skel->maps.cgrp_readings, map_size);
    > +
    > + set_max_rlimit();
    > +
    > + err = bperf_cgroup_bpf__load(skel);
    > + if (err) {
    > + pr_err("Failed to load cgroup skeleton\n");
    > + goto out;
    > + }
    > +
    > + if (cgroup_is_v2("perf_event") > 0)
    > + skel->bss->use_cgroup_v2 = 1;
    > +
    > + err = -1;
    > +
    > + cgrp_switch = evsel__new(&cgrp_switch_attr);
    > + if (evsel__open_per_cpu(cgrp_switch, evlist->core.all_cpus, -1) < 0) {
    > + pr_err("Failed to open cgroup switches event\n");
    > + goto out;
    > + }
    > +
    > + for (i = 0; i < nr_cpus; i++) {
    > + link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
    > + FD(cgrp_switch, i));
    > + if (IS_ERR(link)) {
    > + pr_err("Failed to attach cgroup program\n");
    > + err = PTR_ERR(link);
    > + goto out;
    > + }
    > + }
    > +
    > + /*
    > + * Update cgrp_idx map from cgroup-id to event index.
    > + */
    > + cgrp = NULL;
    > + i = 0;
    > +
    > + evlist__for_each_entry(evlist, evsel) {
    > + if (cgrp == NULL || evsel->cgrp == leader_cgrp) {
    > + leader_cgrp = evsel->cgrp;
    > + evsel->cgrp = NULL;
    > +
    > + /* open single copy of the events w/o cgroup */
    > + err = evsel__open_per_cpu(evsel, evlist->core.all_cpus, -1);
    > + if (err) {
    > + pr_err("Failed to open first cgroup events\n");
    > + goto out;
    > + }
    > +
    > + map_fd = bpf_map__fd(skel->maps.events);
    > + for (cpu = 0; cpu < nr_cpus; cpu++) {
    > + int fd = FD(evsel, cpu);
    > + __u32 idx = evsel->idx * total_cpus +
    > + evlist->core.all_cpus->map[cpu];
    > +
    > + err = bpf_map_update_elem(map_fd, &idx, &fd,
    > + BPF_ANY);
    > + if (err < 0) {
    > + pr_err("Failed to update perf_event fd\n");
    > + goto out;
    > + }
    > + }
    > +
    > + evsel->cgrp = leader_cgrp;
    > + }
    > + evsel->supported = true;
    > +
    > + if (evsel->cgrp == cgrp)
    > + continue;
    > +
    > + cgrp = evsel->cgrp;
    > +
    > + if (read_cgroup_id(cgrp) < 0) {
    > + pr_err("Failed to get cgroup id\n");
    > + err = -1;
    > + goto out;
    > + }
    > +
    > + map_fd = bpf_map__fd(skel->maps.cgrp_idx);
    > + err = bpf_map_update_elem(map_fd, &cgrp->id, &i, BPF_ANY);
    > + if (err < 0) {
    > + pr_err("Failed to update cgroup index map\n");
    > + goto out;
    > + }
    > +
    > + i++;
    > + }
    > +
    > + /*
    > + * bperf uses BPF_PROG_TEST_RUN to get accurate reading. Check
    > + * whether the kernel support it
    > + */
    > + prog_fd = bpf_program__fd(skel->progs.trigger_read);
    > + err = bperf_trigger_reading(prog_fd, 0);
    > + if (err) {
    > + pr_debug("The kernel does not support test_run for raw_tp BPF programs.\n"
    > + "Therefore, --for-each-cgroup might show inaccurate readings\n");

    I think this should be a warning, and we should set err = 0 to continue?

    > + }
    > +
    > +out:
    > + return err;
    > +}
    > +

    [...]

    > +
    > +/*
    > + * trigger the leader prog on each cpu, so the cgrp_reading map could get
    > + * the latest results.
    > + */
    > +static int bperf_cgrp__sync_counters(struct evlist *evlist)
    > +{
    > + int i, cpu;
    > + int nr_cpus = evlist->core.all_cpus->nr;
    > + int prog_fd = bpf_program__fd(skel->progs.trigger_read);
    > +
    > + for (i = 0; i < nr_cpus; i++) {
    > + cpu = evlist->core.all_cpus->map[i];
    > + bperf_trigger_reading(prog_fd, cpu);
    > + }
    > +
    > + return 0;
    > +}
    > +
    > +static int bperf_cgrp__enable(struct evsel *evsel)
    > +{

    Do we need to call bperf_cgrp__sync_counters() before setting enabled to 1?
    If we don't, we may count some numbers before setting enabled to 1, no?

    > + skel->bss->enabled = 1;
    > + return 0;
    > +}

    [...]

    \
     
     \ /
      Last update: 2021-06-30 20:48    [W:5.191 / U:0.024 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site