diff options
Diffstat (limited to 'tools/perf/builtin-stat.c')
-rw-r--r-- | tools/perf/builtin-stat.c | 127 |
1 files changed, 61 insertions, 66 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index fdf5172646a5..2c38dd98f6ca 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c @@ -97,7 +97,7 @@ #include <internal/threadmap.h> #define DEFAULT_SEPARATOR " " -#define FREEZE_ON_SMI_PATH "devices/cpu/freeze_on_smi" +#define FREEZE_ON_SMI_PATH "bus/event_source/devices/cpu/freeze_on_smi" static void print_counters(struct timespec *ts, int argc, const char **argv); @@ -108,11 +108,7 @@ static struct parse_events_option_args parse_events_option_args = { static bool all_counters_use_bpf = true; -static struct target target = { - .uid = UINT_MAX, -}; - -#define METRIC_ONLY_LEN 20 +static struct target target; static volatile sig_atomic_t child_pid = -1; static int detailed_run = 0; @@ -151,21 +147,6 @@ static struct perf_stat perf_stat; static volatile sig_atomic_t done = 0; -static struct perf_stat_config stat_config = { - .aggr_mode = AGGR_GLOBAL, - .aggr_level = MAX_CACHE_LVL + 1, - .scale = true, - .unit_width = 4, /* strlen("unit") */ - .run_count = 1, - .metric_only_len = METRIC_ONLY_LEN, - .walltime_nsecs_stats = &walltime_nsecs_stats, - .ru_stats = &ru_stats, - .big_num = true, - .ctl_fd = -1, - .ctl_fd_ack = -1, - .iostat_run = false, -}; - /* Options set from the command line. */ struct opt_aggr_mode { bool node, socket, die, cluster, cache, core, thread, no_aggr; @@ -698,8 +679,6 @@ static enum counter_recovery stat_handle_error(struct evsel *counter) if (child_pid != -1) kill(child_pid, SIGTERM); - tpebs_delete(); - return COUNTER_FATAL; } @@ -1071,16 +1050,6 @@ static void sig_atexit(void) kill(getpid(), signr); } -void perf_stat__set_big_num(int set) -{ - stat_config.big_num = (set != 0); -} - -void perf_stat__set_no_csv_summary(int set) -{ - stat_config.no_csv_summary = (set != 0); -} - static int stat__set_big_num(const struct option *opt __maybe_unused, const char *s __maybe_unused, int unset) { @@ -1396,7 +1365,7 @@ static struct aggr_cpu_id perf_stat__get_aggr(struct perf_stat_config *config, struct aggr_cpu_id id; /* per-process mode - should use global aggr mode */ - if (cpu.cpu == -1) + if (cpu.cpu == -1 || cpu.cpu >= config->cpus_aggr_map->nr) return get_id(config, cpu); if (aggr_cpu_id__is_empty(&config->cpus_aggr_map->map[cpu.cpu])) @@ -1544,11 +1513,8 @@ static int perf_stat_init_aggr_mode(void) * taking the highest cpu number to be the size of * the aggregation translate cpumap. */ - if (!perf_cpu_map__is_any_cpu_or_is_empty(evsel_list->core.user_requested_cpus)) - nr = perf_cpu_map__max(evsel_list->core.user_requested_cpus).cpu; - else - nr = 0; - stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr + 1); + nr = perf_cpu_map__max(evsel_list->core.all_cpus).cpu + 1; + stat_config.cpus_aggr_map = cpu_aggr_map__empty_new(nr); return stat_config.cpus_aggr_map ? 0 : -ENOMEM; } @@ -1723,48 +1689,48 @@ static struct aggr_cpu_id perf_env__get_global_aggr_by_cpu(struct perf_cpu cpu _ static struct aggr_cpu_id perf_stat__get_socket_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_socket_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_socket_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_die_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_die_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_die_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_cluster_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_cluster_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_cluster_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_cache_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_cache_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_cache_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_core_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_core_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_core_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_cpu_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_cpu_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_cpu_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_node_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_node_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_node_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static struct aggr_cpu_id perf_stat__get_global_file(struct perf_stat_config *config __maybe_unused, struct perf_cpu cpu) { - return perf_env__get_global_aggr_by_cpu(cpu, &perf_stat.session->header.env); + return perf_env__get_global_aggr_by_cpu(cpu, perf_session__env(perf_stat.session)); } static aggr_cpu_id_get_t aggr_mode__get_aggr_file(enum aggr_mode aggr_mode) @@ -1823,7 +1789,7 @@ static aggr_get_id_t aggr_mode__get_id_file(enum aggr_mode aggr_mode) static int perf_stat_init_aggr_mode_file(struct perf_stat *st) { - struct perf_env *env = &st->session->header.env; + struct perf_env *env = perf_session__env(st->session); aggr_cpu_id_get_t get_id = aggr_mode__get_aggr_file(stat_config.aggr_mode); bool needs_sort = stat_config.aggr_mode != AGGR_NONE; @@ -1883,7 +1849,7 @@ static int add_default_events(void) * will use this approach. To determine transaction support * on an architecture test for such a metric name. */ - if (!metricgroup__has_metric(pmu, "transaction")) { + if (!metricgroup__has_metric_or_groups(pmu, "transaction")) { pr_err("Missing transaction metrics\n"); ret = -1; goto out; @@ -1894,8 +1860,7 @@ static int add_default_events(void) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, - stat_config.hardware_aware_grouping, - &stat_config.metric_events); + stat_config.hardware_aware_grouping); goto out; } @@ -1917,7 +1882,7 @@ static int add_default_events(void) smi_reset = true; } - if (!metricgroup__has_metric(pmu, "smi")) { + if (!metricgroup__has_metric_or_groups(pmu, "smi")) { pr_err("Missing smi metrics\n"); ret = -1; goto out; @@ -1932,8 +1897,7 @@ static int add_default_events(void) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, - stat_config.hardware_aware_grouping, - &stat_config.metric_events); + stat_config.hardware_aware_grouping); goto out; } @@ -1970,8 +1934,7 @@ static int add_default_events(void) /*metric_no_threshold=*/true, stat_config.user_requested_cpu_list, stat_config.system_wide, - stat_config.hardware_aware_grouping, - &stat_config.metric_events) < 0) { + stat_config.hardware_aware_grouping) < 0) { ret = -1; goto out; } @@ -2007,7 +1970,7 @@ static int add_default_events(void) * Add TopdownL1 metrics if they exist. To minimize * multiplexing, don't request threshold computation. */ - if (metricgroup__has_metric(pmu, "Default")) { + if (metricgroup__has_metric_or_groups(pmu, "Default")) { struct evlist *metric_evlist = evlist__new(); if (!metric_evlist) { @@ -2020,8 +1983,7 @@ static int add_default_events(void) /*metric_no_threshold=*/true, stat_config.user_requested_cpu_list, stat_config.system_wide, - stat_config.hardware_aware_grouping, - &stat_config.metric_events) < 0) { + stat_config.hardware_aware_grouping) < 0) { ret = -1; goto out; } @@ -2030,6 +1992,9 @@ static int add_default_events(void) evsel->default_metricgroup = true; evlist__splice_list_tail(evlist, &metric_evlist->core.entries); + metricgroup__copy_metric_events(evlist, /*cgrp=*/NULL, + &evlist->metric_events, + &metric_evlist->metric_events); evlist__delete(metric_evlist); } } @@ -2084,6 +2049,9 @@ out: } parse_events_error__exit(&err); evlist__splice_list_tail(evsel_list, &evlist->core.entries); + metricgroup__copy_metric_events(evsel_list, /*cgrp=*/NULL, + &evsel_list->metric_events, + &evlist->metric_events); evlist__delete(evlist); return ret; } @@ -2144,8 +2112,9 @@ static int process_stat_round_event(struct perf_session *session, { struct perf_record_stat_round *stat_round = &event->stat_round; struct timespec tsh, *ts = NULL; - const char **argv = session->header.env.cmdline_argv; - int argc = session->header.env.nr_cmdline; + struct perf_env *env = perf_session__env(session); + const char **argv = env->cmdline_argv; + int argc = env->nr_cmdline; process_counters(); @@ -2356,6 +2325,32 @@ static void setup_system_wide(int forks) } } +#ifdef HAVE_ARCH_X86_64_SUPPORT +static int parse_tpebs_mode(const struct option *opt, const char *str, + int unset __maybe_unused) +{ + enum tpebs_mode *mode = opt->value; + + if (!strcasecmp("mean", str)) { + *mode = TPEBS_MODE__MEAN; + return 0; + } + if (!strcasecmp("min", str)) { + *mode = TPEBS_MODE__MIN; + return 0; + } + if (!strcasecmp("max", str)) { + *mode = TPEBS_MODE__MAX; + return 0; + } + if (!strcasecmp("last", str)) { + *mode = TPEBS_MODE__LAST; + return 0; + } + return -1; +} +#endif // HAVE_ARCH_X86_64_SUPPORT + int cmd_stat(int argc, const char **argv) { struct opt_aggr_mode opt_mode = {}; @@ -2460,6 +2455,9 @@ int cmd_stat(int argc, const char **argv) #ifdef HAVE_ARCH_X86_64_SUPPORT OPT_BOOLEAN(0, "record-tpebs", &tpebs_recording, "enable recording for tpebs when retire_latency required"), + OPT_CALLBACK(0, "tpebs-mode", &tpebs_mode, "tpebs-mode", + "Mode of TPEBS recording: mean, min or max", + parse_tpebs_mode), #endif OPT_UINTEGER(0, "td-level", &stat_config.topdown_level, "Set the metrics level for the top-down statistics (0: max level)"), @@ -2741,8 +2739,7 @@ int cmd_stat(int argc, const char **argv) stat_config.metric_no_threshold, stat_config.user_requested_cpu_list, stat_config.system_wide, - stat_config.hardware_aware_grouping, - &stat_config.metric_events); + stat_config.hardware_aware_grouping); zfree(&metrics); if (ret) { @@ -2762,8 +2759,7 @@ int cmd_stat(int argc, const char **argv) goto out; } - if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, - &stat_config.metric_events, true) < 0) { + if (evlist__expand_cgroup(evsel_list, stat_config.cgroup_list, true) < 0) { parse_options_usage(stat_usage, stat_options, "for-each-cgroup", 0); goto out; @@ -2938,7 +2934,6 @@ out: evlist__delete(evsel_list); - metricgroup__rblist_exit(&stat_config.metric_events); evlist__close_control(stat_config.ctl_fd, stat_config.ctl_fd_ack, &stat_config.ctl_fd_close); return status; |