lkml.org 
[lkml]   [2012]   [May]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 04/15] perf tools: Introduce struct perf_target
    Date
    From: Namhyung Kim <namhyung.kim@lge.com>

    The perf_target struct will be used for taking care of cpu/thread maps
    based on user's input. Since it is used on various subcommands it'd
    better factoring it out.

    Thanks to Arnaldo for suggesting the better name.

    Signed-off-by: Namhyung Kim <namhyung.kim@lge.com>
    Reviewed-by: David Ahern <dsahern@gmail.com>
    Cc: David Ahern <dsahern@gmail.com>
    Cc: Ingo Molnar <mingo@redhat.com>
    Cc: Namhyung Kim <namhyung@gmail.com>
    Cc: Paul Mackerras <paulus@samba.org>
    Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Link: http://lkml.kernel.org/r/1335417327-11796-2-git-send-email-namhyung.kim@lge.com
    Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
    ---
    tools/perf/builtin-record.c | 41 ++++++++++++++++++++++-------------------
    tools/perf/builtin-test.c | 5 +++--
    tools/perf/perf.h | 15 ++++++++++-----
    tools/perf/util/evlist.c | 2 +-
    tools/perf/util/evsel.c | 10 +++++-----
    5 files changed, 41 insertions(+), 32 deletions(-)

    diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
    index 10b1f1f..4dcf270 100644
    --- a/tools/perf/builtin-record.c
    +++ b/tools/perf/builtin-record.c
    @@ -44,7 +44,6 @@ struct perf_record {
    struct perf_evlist *evlist;
    struct perf_session *session;
    const char *progname;
    - const char *uid_str;
    int output;
    unsigned int page_size;
    int realtime_prio;
    @@ -218,7 +217,7 @@ try_again:
    if (err == EPERM || err == EACCES) {
    ui__error_paranoid();
    exit(EXIT_FAILURE);
    - } else if (err == ENODEV && opts->cpu_list) {
    + } else if (err == ENODEV && opts->target.cpu_list) {
    die("No such device - did you specify"
    " an out-of-range profile CPU?\n");
    } else if (err == EINVAL) {
    @@ -578,7 +577,7 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
    perf_session__process_machines(session, tool,
    perf_event__synthesize_guest_os);

    - if (!opts->system_wide)
    + if (!opts->target.system_wide)
    perf_event__synthesize_thread_map(tool, evsel_list->threads,
    process_synthesized_event,
    machine);
    @@ -765,9 +764,9 @@ const struct option record_options[] = {
    parse_events_option),
    OPT_CALLBACK(0, "filter", &record.evlist, "filter",
    "event filter", parse_filter),
    - OPT_STRING('p', "pid", &record.opts.target_pid, "pid",
    + OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
    "record events on existing process id"),
    - OPT_STRING('t', "tid", &record.opts.target_tid, "tid",
    + OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
    "record events on existing thread id"),
    OPT_INTEGER('r', "realtime", &record.realtime_prio,
    "collect data with this RT SCHED_FIFO priority"),
    @@ -775,11 +774,11 @@ const struct option record_options[] = {
    "collect data without buffering"),
    OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
    "collect raw sample records from all opened counters"),
    - OPT_BOOLEAN('a', "all-cpus", &record.opts.system_wide,
    + OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
    "system-wide collection from all CPUs"),
    OPT_BOOLEAN('A', "append", &record.append_file,
    "append to the output file to do incremental profiling"),
    - OPT_STRING('C', "cpu", &record.opts.cpu_list, "cpu",
    + OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
    "list of cpus to monitor"),
    OPT_BOOLEAN('f', "force", &record.force,
    "overwrite existing data file (deprecated)"),
    @@ -813,7 +812,8 @@ const struct option record_options[] = {
    OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
    "monitor event in cgroup name only",
    parse_cgroups),
    - OPT_STRING('u', "uid", &record.uid_str, "user", "user to profile"),
    + OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
    + "user to profile"),

    OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
    "branch any", "sample any taken branches",
    @@ -842,8 +842,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)

    argc = parse_options(argc, argv, record_options, record_usage,
    PARSE_OPT_STOP_AT_NON_OPTION);
    - if (!argc && !rec->opts.target_pid && !rec->opts.target_tid &&
    - !rec->opts.system_wide && !rec->opts.cpu_list && !rec->uid_str)
    + if (!argc && !rec->opts.target.pid && !rec->opts.target.tid &&
    + !rec->opts.target.system_wide && !rec->opts.target.cpu_list &&
    + !rec->opts.target.uid_str)
    usage_with_options(record_usage, record_options);

    if (rec->force && rec->append_file) {
    @@ -856,7 +857,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
    rec->write_mode = WRITE_FORCE;
    }

    - if (nr_cgroups && !rec->opts.system_wide) {
    + if (nr_cgroups && !rec->opts.target.system_wide) {
    fprintf(stderr, "cgroup monitoring only available in"
    " system-wide mode\n");
    usage_with_options(record_usage, record_options);
    @@ -883,17 +884,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
    goto out_symbol_exit;
    }

    - rec->opts.uid = parse_target_uid(rec->uid_str, rec->opts.target_tid,
    - rec->opts.target_pid);
    - if (rec->uid_str != NULL && rec->opts.uid == UINT_MAX - 1)
    + rec->opts.target.uid = parse_target_uid(rec->opts.target.uid_str,
    + rec->opts.target.tid,
    + rec->opts.target.pid);
    + if (rec->opts.target.uid_str != NULL &&
    + rec->opts.target.uid == UINT_MAX - 1)
    goto out_free_fd;

    - if (rec->opts.target_pid)
    - rec->opts.target_tid = rec->opts.target_pid;
    + if (rec->opts.target.pid)
    + rec->opts.target.tid = rec->opts.target.pid;

    - if (perf_evlist__create_maps(evsel_list, rec->opts.target_pid,
    - rec->opts.target_tid, rec->opts.uid,
    - rec->opts.cpu_list) < 0)
    + if (perf_evlist__create_maps(evsel_list, rec->opts.target.pid,
    + rec->opts.target.tid, rec->opts.target.uid,
    + rec->opts.target.cpu_list) < 0)
    usage_with_options(record_usage, record_options);

    list_for_each_entry(pos, &evsel_list->entries, node) {
    diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
    index 5502a4a..27882d8 100644
    --- a/tools/perf/builtin-test.c
    +++ b/tools/perf/builtin-test.c
    @@ -1207,8 +1207,9 @@ static int test__PERF_RECORD(void)
    * perf_evlist__prepare_workload we'll fill in the only thread
    * we're monitoring, the one forked there.
    */
    - err = perf_evlist__create_maps(evlist, opts.target_pid,
    - opts.target_tid, UINT_MAX, opts.cpu_list);
    + err = perf_evlist__create_maps(evlist, opts.target.pid,
    + opts.target.tid, UINT_MAX,
    + opts.target.cpu_list);
    if (err < 0) {
    pr_debug("Not enough memory to create thread/cpu maps\n");
    goto out_delete_evlist;
    diff --git a/tools/perf/perf.h b/tools/perf/perf.h
    index 89e3355..7e226c0 100644
    --- a/tools/perf/perf.h
    +++ b/tools/perf/perf.h
    @@ -207,10 +207,17 @@ extern const char perf_version_string[];

    void pthread__unblock_sigwinch(void);

    -struct perf_record_opts {
    - const char *target_pid;
    - const char *target_tid;
    +struct perf_target {
    + const char *pid;
    + const char *tid;
    + const char *cpu_list;
    + const char *uid_str;
    uid_t uid;
    + bool system_wide;
    +};
    +
    +struct perf_record_opts {
    + struct perf_target target;
    bool call_graph;
    bool group;
    bool inherit_stat;
    @@ -223,7 +230,6 @@ struct perf_record_opts {
    bool sample_time;
    bool sample_id_all_missing;
    bool exclude_guest_missing;
    - bool system_wide;
    bool period;
    unsigned int freq;
    unsigned int mmap_pages;
    @@ -231,7 +237,6 @@ struct perf_record_opts {
    int branch_stack;
    u64 default_interval;
    u64 user_interval;
    - const char *cpu_list;
    };

    #endif
    diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
    index 1986d80..7080901 100644
    --- a/tools/perf/util/evlist.c
    +++ b/tools/perf/util/evlist.c
    @@ -827,7 +827,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
    exit(-1);
    }

    - if (!opts->system_wide && !opts->target_tid && !opts->target_pid)
    + if (!opts->target.system_wide && !opts->target.tid && !opts->target.pid)
    evlist->threads->map[0] = evlist->workload.pid;

    close(child_ready_pipe[1]);
    diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
    index 8c13dbc..d90598e 100644
    --- a/tools/perf/util/evsel.c
    +++ b/tools/perf/util/evsel.c
    @@ -106,15 +106,15 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
    if (opts->call_graph)
    attr->sample_type |= PERF_SAMPLE_CALLCHAIN;

    - if (opts->system_wide)
    + if (opts->target.system_wide)
    attr->sample_type |= PERF_SAMPLE_CPU;

    if (opts->period)
    attr->sample_type |= PERF_SAMPLE_PERIOD;

    if (!opts->sample_id_all_missing &&
    - (opts->sample_time || opts->system_wide ||
    - !opts->no_inherit || opts->cpu_list))
    + (opts->sample_time || opts->target.system_wide ||
    + !opts->no_inherit || opts->target.cpu_list))
    attr->sample_type |= PERF_SAMPLE_TIME;

    if (opts->raw_samples) {
    @@ -135,8 +135,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct perf_record_opts *opts,
    attr->mmap = track;
    attr->comm = track;

    - if (!opts->target_pid && !opts->target_tid && !opts->system_wide &&
    - (!opts->group || evsel == first)) {
    + if (!opts->target.pid && !opts->target.tid &&
    + !opts->target.system_wide && (!opts->group || evsel == first)) {
    attr->disabled = 1;
    attr->enable_on_exec = 1;
    }
    --
    1.7.9.2.358.g22243


    \
     
     \ /
      Last update: 2012-05-02 21:41    [W:0.035 / U:88.968 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site