lkml.org 
[lkml]   [2016]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 02/12] perf report: Apply callchain percent limit on --stdio
Date
Currently 'perf report --stdio' missed to check percent limit of
callchains. Fix it.

Reported-by: Andi Kleen <andi@firstfloor.org>
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/ui/stdio/hist.c | 32 ++++++++++++++++++++++++++++----
1 file changed, 28 insertions(+), 4 deletions(-)

diff --git a/tools/perf/ui/stdio/hist.c b/tools/perf/ui/stdio/hist.c
index 387110d50b00..905286ec754c 100644
--- a/tools/perf/ui/stdio/hist.c
+++ b/tools/perf/ui/stdio/hist.c
@@ -96,12 +96,19 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
while (node) {
u64 new_total;
u64 cumul;
+ double percent = 0.0;

child = rb_entry(node, struct callchain_node, rb_node);
cumul = callchain_cumul_hits(child);
remaining -= cumul;
cumul_count += callchain_cumul_counts(child);

+ next = rb_next(node);
+
+ percent = 100.0 * cumul / total_samples;
+ if (percent < callchain_param.min_percent)
+ goto next;
+
/*
* The depth mask manages the output of pipes that show
* the depth. We don't want to keep the pipes of the current
@@ -109,7 +116,6 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
* Except if we have remaining filtered hits. They will
* supersede the last child
*/
- next = rb_next(node);
if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
new_depth_mask &= ~(1 << (depth - 1));

@@ -136,9 +142,11 @@ static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
depth + 1,
new_depth_mask | (1 << depth),
left_margin);
- node = next;
+
if (++entries_printed == callchain_param.print_limit)
break;
+next:
+ node = next;
}

if (callchain_param.mode == CHAIN_GRAPH_REL &&
@@ -250,10 +258,18 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
u32 entries_printed = 0;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(tree);
+ double percent;
+ u64 hits;

while (rb_node) {
chain = rb_entry(rb_node, struct callchain_node, rb_node);

+ hits = callchain_cumul_hits(chain);
+ percent = 100.0 * hits / total_samples;
+
+ if (percent < callchain_param.min_percent)
+ goto next;
+
ret += fprintf(fp, " ");
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, "\n");
@@ -261,7 +277,7 @@ static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
-
+next:
rb_node = rb_next(rb_node);
}

@@ -301,18 +317,26 @@ static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
u32 entries_printed = 0;
struct callchain_node *chain;
struct rb_node *rb_node = rb_first(tree);
+ double percent;
+ u64 hits;

while (rb_node) {

chain = rb_entry(rb_node, struct callchain_node, rb_node);

+ hits = callchain_cumul_hits(chain);
+ percent = 100.0 * hits / total_samples;
+
+ if (percent < callchain_param.min_percent)
+ goto next;
+
ret += callchain_node__fprintf_value(chain, fp, total_samples);
ret += fprintf(fp, " ");
ret += __callchain__fprintf_folded(fp, chain);
ret += fprintf(fp, "\n");
if (++entries_printed == callchain_param.print_limit)
break;
-
+next:
rb_node = rb_next(rb_node);
}

--
2.6.4
\
 
 \ /
  Last update: 2016-01-24 15:21    [W:0.126 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site