lkml.org 
[lkml]   [2019]   [Nov]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
Subject[PATCH v3 08/10] perf: cache perf_event_groups_first for cgroups
From
Add a per-CPU cache of the pinned and flexible perf_event_groups_first
value for a cgroup avoiding an O(log(#perf events)) searches during
sched_in.

Based-on-work-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
---
include/linux/perf_event.h | 6 +++
kernel/events/core.c | 79 +++++++++++++++++++++++++++-----------
2 files changed, 62 insertions(+), 23 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b3580afbf358..cfd0b320418c 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -877,6 +877,12 @@ struct perf_cgroup_info {
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
+ /* A cache of the first event with the perf_cpu_context's
+ * perf_event_context for the first event in pinned_groups or
+ * flexible_groups. Avoids an rbtree search during sched_in.
+ */
+ struct perf_event * __percpu *pinned_event;
+ struct perf_event * __percpu *flexible_event;
};

/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 11594d8bbb2e..9f0febf51d97 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1638,6 +1638,25 @@ perf_event_groups_insert(struct perf_event_groups *groups,

rb_link_node(&event->group_node, parent, node);
rb_insert_color(&event->group_node, &groups->tree);
+#ifdef CONFIG_CGROUP_PERF
+ if (is_cgroup_event(event)) {
+ struct perf_event **cgrp_event;
+
+ if (event->attr.pinned)
+ cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
+ event->cpu);
+ else
+ cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
+ event->cpu);
+ /*
+ * Cgroup events for the same cgroup on the same CPU will
+ * always be inserted at the right because of bigger
+ * @groups->index. Only need to set *cgrp_event when it's NULL.
+ */
+ if (!*cgrp_event)
+ *cgrp_event = event;
+ }
+#endif
}

/*
@@ -1652,6 +1671,9 @@ add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx)
perf_event_groups_insert(groups, event);
}

+static struct perf_event *
+perf_event_groups_next(struct perf_event *event);
+
/*
* Delete a group from a tree.
*/
@@ -1662,6 +1684,22 @@ perf_event_groups_delete(struct perf_event_groups *groups,
WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) ||
RB_EMPTY_ROOT(&groups->tree));

+#ifdef CONFIG_CGROUP_PERF
+ if (is_cgroup_event(event)) {
+ struct perf_event **cgrp_event;
+
+ if (event->attr.pinned)
+ cgrp_event = per_cpu_ptr(event->cgrp->pinned_event,
+ event->cpu);
+ else
+ cgrp_event = per_cpu_ptr(event->cgrp->flexible_event,
+ event->cpu);
+
+ if (*cgrp_event == event)
+ *cgrp_event = perf_event_groups_next(event);
+ }
+#endif
+
rb_erase(&event->group_node, &groups->tree);
init_event_group(event);
}
@@ -1679,20 +1717,14 @@ del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx)
}

/*
- * Get the leftmost event in the cpu/cgroup subtree.
+ * Get the leftmost event in the cpu subtree without a cgroup (ie task or
+ * system-wide).
*/
static struct perf_event *
-perf_event_groups_first(struct perf_event_groups *groups, int cpu,
- struct cgroup *cgrp)
+perf_event_groups_first_no_cgroup(struct perf_event_groups *groups, int cpu)
{
struct perf_event *node_event = NULL, *match = NULL;
struct rb_node *node = groups->tree.rb_node;
-#ifdef CONFIG_CGROUP_PERF
- int node_cgrp_id, cgrp_id = 0;
-
- if (cgrp)
- cgrp_id = cgrp->id;
-#endif

while (node) {
node_event = container_of(node, struct perf_event, group_node);
@@ -1706,18 +1738,10 @@ perf_event_groups_first(struct perf_event_groups *groups, int cpu,
continue;
}
#ifdef CONFIG_CGROUP_PERF
- node_cgrp_id = 0;
- if (node_event->cgrp && node_event->cgrp->css.cgroup)
- node_cgrp_id = node_event->cgrp->css.cgroup->id;
-
- if (cgrp_id < node_cgrp_id) {
+ if (node_event->cgrp) {
node = node->rb_left;
continue;
}
- if (cgrp_id > node_cgrp_id) {
- node = node->rb_right;
- continue;
- }
#endif
match = node_event;
node = node->rb_left;
@@ -3556,18 +3580,27 @@ static int ctx_groups_sched_in(struct perf_event_context *ctx,
.cap = ARRAY_SIZE(itrs),
};
/* Events not within a CPU context may be on any CPU. */
- __heap_add(&event_heap, perf_event_groups_first(groups, -1,
- NULL));
+ __heap_add(&event_heap,
+ perf_event_groups_first_no_cgroup(groups, -1));

}
evt = event_heap.data;

- __heap_add(&event_heap, perf_event_groups_first(groups, cpu, NULL));
+ __heap_add(&event_heap,
+ perf_event_groups_first_no_cgroup(groups, cpu));

#ifdef CONFIG_CGROUP_PERF
for (; css; css = css->parent) {
- __heap_add(&event_heap, perf_event_groups_first(groups, cpu,
- css->cgroup));
+ struct perf_cgroup *cgrp;
+
+ /* root cgroup doesn't have events */
+ if (css->id == 1)
+ break;
+
+ cgrp = container_of(css, struct perf_cgroup, css);
+ __heap_add(&event_heap, is_pinned
+ ? *per_cpu_ptr(cgrp->pinned_event, cpu)
+ : *per_cpu_ptr(cgrp->flexible_event, cpu));
}
#endif

--
2.24.0.432.g9d3f5f5b63-goog
\
 
 \ /
  Last update: 2019-11-14 01:32    [W:0.291 / U:5.580 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site