lkml.org 
[lkml]   [2015]   [Mar]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 36/38] perf report: Add --multi-thread option and config item
Date
The --multi-thread option is to enable parallel processing so user can
force serial processing even for indexed data file. It default to false
for now but users also can changes this by setting "report.multi_thread"
config option in ~/.perfconfig file.

Signed-off-by: Namhyung Kim <namhyung@kernel.org>
---
tools/perf/Documentation/perf-report.txt | 3 ++
tools/perf/builtin-report.c | 66 +++++++++++++++++++++++++++-----
2 files changed, 60 insertions(+), 9 deletions(-)

diff --git a/tools/perf/Documentation/perf-report.txt b/tools/perf/Documentation/perf-report.txt
index dd7cccdde498..e00077a658c1 100644
--- a/tools/perf/Documentation/perf-report.txt
+++ b/tools/perf/Documentation/perf-report.txt
@@ -318,6 +318,9 @@ OPTIONS
--header-only::
Show only perf.data header (forces --stdio).

+--multi-thread::
+ Speed up report by parallelizing sample processing using multi-thread.
+
SEE ALSO
--------
linkperf:perf-stat[1], linkperf:perf-annotate[1]
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 5adf269b84a9..4db4cfbb8c75 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -51,6 +51,7 @@ struct report {
bool mem_mode;
bool header;
bool header_only;
+ bool multi_thread;
int max_stack;
struct perf_read_values show_threads_values;
const char *pretty_printing_style;
@@ -82,6 +83,10 @@ static int report__config(const char *var, const char *value, void *cb)
rep->queue_size = perf_config_u64(var, value);
return 0;
}
+ if (!strcmp(var, "report.multi-thread")) {
+ rep->multi_thread = perf_config_bool(var, value);
+ return 0;
+ }

return perf_default_config(var, value, cb);
}
@@ -128,17 +133,18 @@ static int hist_iter__report_callback(struct hist_entry_iter *iter,
return err;
}

-static int process_sample_event(struct perf_tool *tool,
- union perf_event *event,
- struct perf_sample *sample,
- struct perf_evsel *evsel,
- struct machine *machine)
+static int __process_sample_event(struct perf_tool *tool __maybe_unused,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine,
+ struct hists *hists,
+ struct report *rep)
{
- struct report *rep = container_of(tool, struct report, tool);
struct addr_location al;
struct hist_entry_iter iter = {
.evsel = evsel,
- .hists = evsel__hists(evsel),
+ .hists = hists,
.sample = sample,
.session = rep->session,
.hide_unresolved = rep->hide_unresolved,
@@ -178,6 +184,31 @@ static int process_sample_event(struct perf_tool *tool,
return ret;
}

+static int process_sample_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct report *rep = container_of(tool, struct report, tool);
+
+ return __process_sample_event(tool, event, sample, evsel, machine,
+ evsel__hists(evsel), rep);
+}
+
+static int process_sample_event_mt(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_sample *sample,
+ struct perf_evsel *evsel,
+ struct machine *machine)
+{
+ struct perf_tool_mt *mt = container_of(tool, struct perf_tool_mt, tool);
+ struct report *rep = mt->priv;
+
+ return __process_sample_event(tool, event, sample, evsel, machine,
+ &mt->hists[evsel->idx], rep);
+}
+
static int process_read_event(struct perf_tool *tool,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -489,7 +520,12 @@ static int __cmd_report(struct report *rep)
if (ret)
return ret;

- ret = perf_session__process_events(session, &rep->tool);
+ if (rep->multi_thread) {
+ rep->tool.sample = process_sample_event_mt;
+ ret = perf_session__process_events_mt(session, &rep->tool, rep);
+ } else {
+ ret = perf_session__process_events(session, &rep->tool);
+ }
if (ret)
return ret;

@@ -512,7 +548,12 @@ static int __cmd_report(struct report *rep)
}
}

- report__collapse_hists(rep);
+ /*
+ * For multi-thread report, it already calls hists__mt_resort()
+ * so no need to collapse here.
+ */
+ if (!rep->multi_thread)
+ report__collapse_hists(rep);

if (session_done())
return 0;
@@ -720,6 +761,8 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
"Don't show entries under that percent", parse_percent_limit),
OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
"how to display percentage of filtered entries", parse_filter_percentage),
+ OPT_BOOLEAN(0, "multi-thread", &report.multi_thread,
+ "Speed up sample processing using multi-thead"),
OPT_END()
};
struct perf_data_file file = {
@@ -764,6 +807,11 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
report.queue_size);
}

+ if (report.multi_thread && !perf_session__has_index(session)) {
+ pr_debug("fallback to single thread for normal data file.\n");
+ report.multi_thread = false;
+ }
+
report.session = session;

has_br_stack = perf_header__has_feat(&session->header,
--
2.2.2


\
 
 \ /
  Last update: 2015-03-03 04:41    [W:0.160 / U:1.808 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site