summaryrefslogtreecommitdiff
path: root/tools/perf/util/parse-events.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/parse-events.c')
-rw-r--r--tools/perf/util/parse-events.c774
1 files changed, 473 insertions, 301 deletions
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index afeb8d815bbf..8282ddf68b98 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -7,6 +7,7 @@
#include <errno.h>
#include <sys/ioctl.h>
#include <sys/param.h>
+#include "cpumap.h"
#include "term.h"
#include "env.h"
#include "evlist.h"
@@ -16,19 +17,22 @@
#include "string2.h"
#include "strbuf.h"
#include "debug.h"
-#include <api/fs/tracing_path.h>
#include <perf/cpumap.h>
#include <util/parse-events-bison.h>
#include <util/parse-events-flex.h>
#include "pmu.h"
#include "pmus.h"
+#include "tp_pmu.h"
#include "asm/bug.h"
+#include "ui/ui.h"
#include "util/parse-branch-options.h"
#include "util/evsel_config.h"
#include "util/event.h"
#include "util/bpf-filter.h"
+#include "util/stat.h"
#include "util/util.h"
#include "tracepoint.h"
+#include <api/fs/tracing_path.h>
#define MAX_NAME_LEN 100
@@ -80,77 +84,21 @@ const struct event_symbol event_symbols_hw[PERF_COUNT_HW_MAX] = {
},
};
-const struct event_symbol event_symbols_sw[PERF_COUNT_SW_MAX] = {
- [PERF_COUNT_SW_CPU_CLOCK] = {
- .symbol = "cpu-clock",
- .alias = "",
- },
- [PERF_COUNT_SW_TASK_CLOCK] = {
- .symbol = "task-clock",
- .alias = "",
- },
- [PERF_COUNT_SW_PAGE_FAULTS] = {
- .symbol = "page-faults",
- .alias = "faults",
- },
- [PERF_COUNT_SW_CONTEXT_SWITCHES] = {
- .symbol = "context-switches",
- .alias = "cs",
- },
- [PERF_COUNT_SW_CPU_MIGRATIONS] = {
- .symbol = "cpu-migrations",
- .alias = "migrations",
- },
- [PERF_COUNT_SW_PAGE_FAULTS_MIN] = {
- .symbol = "minor-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_PAGE_FAULTS_MAJ] = {
- .symbol = "major-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_ALIGNMENT_FAULTS] = {
- .symbol = "alignment-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_EMULATION_FAULTS] = {
- .symbol = "emulation-faults",
- .alias = "",
- },
- [PERF_COUNT_SW_DUMMY] = {
- .symbol = "dummy",
- .alias = "",
- },
- [PERF_COUNT_SW_BPF_OUTPUT] = {
- .symbol = "bpf-output",
- .alias = "",
- },
- [PERF_COUNT_SW_CGROUP_SWITCHES] = {
- .symbol = "cgroup-switches",
- .alias = "",
- },
+static const char *const event_types[] = {
+ [PERF_TYPE_HARDWARE] = "hardware",
+ [PERF_TYPE_SOFTWARE] = "software",
+ [PERF_TYPE_TRACEPOINT] = "tracepoint",
+ [PERF_TYPE_HW_CACHE] = "hardware-cache",
+ [PERF_TYPE_RAW] = "raw",
+ [PERF_TYPE_BREAKPOINT] = "breakpoint",
};
-const char *event_type(int type)
+const char *event_type(size_t type)
{
- switch (type) {
- case PERF_TYPE_HARDWARE:
- return "hardware";
-
- case PERF_TYPE_SOFTWARE:
- return "software";
+ if (type >= PERF_TYPE_MAX)
+ return "unknown";
- case PERF_TYPE_TRACEPOINT:
- return "tracepoint";
-
- case PERF_TYPE_HW_CACHE:
- return "hardware-cache";
-
- default:
- break;
- }
-
- return "unknown";
+ return event_types[type];
}
static char *get_config_str(const struct parse_events_terms *head_terms,
@@ -178,6 +126,38 @@ static char *get_config_name(const struct parse_events_terms *head_terms)
return get_config_str(head_terms, PARSE_EVENTS__TERM_TYPE_NAME);
}
+static struct perf_cpu_map *get_config_cpu(const struct parse_events_terms *head_terms)
+{
+ struct parse_events_term *term;
+ struct perf_cpu_map *cpus = NULL;
+
+ if (!head_terms)
+ return NULL;
+
+ list_for_each_entry(term, &head_terms->terms, list) {
+ if (term->type_term == PARSE_EVENTS__TERM_TYPE_CPU) {
+ struct perf_cpu_map *term_cpus;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ term_cpus = perf_cpu_map__new_int(term->val.num);
+ } else {
+ struct perf_pmu *pmu = perf_pmus__find(term->val.str);
+
+ if (pmu && perf_cpu_map__is_empty(pmu->cpus))
+ term_cpus = pmu->is_core ? cpu_map__online() : NULL;
+ else if (pmu)
+ term_cpus = perf_cpu_map__get(pmu->cpus);
+ else
+ term_cpus = perf_cpu_map__new(term->val.str);
+ }
+ perf_cpu_map__merge(&cpus, term_cpus);
+ perf_cpu_map__put(term_cpus);
+ }
+ }
+
+ return cpus;
+}
+
/**
* fix_raw - For each raw term see if there is an event (aka alias) in pmu that
* matches the raw's string value. If the string value matches an
@@ -227,49 +207,98 @@ __add_event(struct list_head *list, int *idx,
struct perf_event_attr *attr,
bool init_attr,
const char *name, const char *metric_id, struct perf_pmu *pmu,
- struct list_head *config_terms, bool auto_merge_stats,
- struct perf_cpu_map *cpu_list, u64 alternate_hw_config)
+ struct list_head *config_terms, struct evsel *first_wildcard_match,
+ struct perf_cpu_map *user_cpus, u64 alternate_hw_config)
{
struct evsel *evsel;
- struct perf_cpu_map *cpus = perf_cpu_map__is_empty(cpu_list) && pmu ? pmu->cpus : cpu_list;
+ bool is_pmu_core;
+ struct perf_cpu_map *cpus, *pmu_cpus;
+ bool has_user_cpus = !perf_cpu_map__is_empty(user_cpus);
- cpus = perf_cpu_map__get(cpus);
- if (pmu)
- perf_pmu__warn_invalid_formats(pmu);
+ /*
+ * Ensure the first_wildcard_match's PMU matches that of the new event
+ * being added. Otherwise try to match with another event further down
+ * the evlist.
+ */
+ if (first_wildcard_match) {
+ struct evsel *pos = list_prev_entry(first_wildcard_match, core.node);
+
+ first_wildcard_match = NULL;
+ list_for_each_entry_continue(pos, list, core.node) {
+ if (perf_pmu__name_no_suffix_match(pos->pmu, pmu->name)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ if (pos->pmu->is_core && (!pmu || pmu->is_core)) {
+ first_wildcard_match = pos;
+ break;
+ }
+ }
+ }
- if (pmu && (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX)) {
- perf_pmu__warn_invalid_config(pmu, attr->config, name,
- PERF_PMU_FORMAT_VALUE_CONFIG, "config");
- perf_pmu__warn_invalid_config(pmu, attr->config1, name,
- PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
- perf_pmu__warn_invalid_config(pmu, attr->config2, name,
- PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
- perf_pmu__warn_invalid_config(pmu, attr->config3, name,
- PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ if (pmu) {
+ perf_pmu__warn_invalid_formats(pmu);
+ if (attr->type == PERF_TYPE_RAW || attr->type >= PERF_TYPE_MAX) {
+ perf_pmu__warn_invalid_config(pmu, attr->config, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG, "config");
+ perf_pmu__warn_invalid_config(pmu, attr->config1, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG1, "config1");
+ perf_pmu__warn_invalid_config(pmu, attr->config2, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG2, "config2");
+ perf_pmu__warn_invalid_config(pmu, attr->config3, name,
+ PERF_PMU_FORMAT_VALUE_CONFIG3, "config3");
+ }
+ }
+ /*
+ * If a PMU wasn't given, such as for legacy events, find now that
+ * warnings won't be generated.
+ */
+ if (!pmu)
+ pmu = perf_pmus__find_by_attr(attr);
+
+ if (pmu) {
+ is_pmu_core = pmu->is_core;
+ pmu_cpus = perf_cpu_map__get(pmu->cpus);
+ if (perf_cpu_map__is_empty(pmu_cpus))
+ pmu_cpus = cpu_map__online();
+ } else {
+ is_pmu_core = (attr->type == PERF_TYPE_HARDWARE ||
+ attr->type == PERF_TYPE_HW_CACHE);
+ pmu_cpus = is_pmu_core ? cpu_map__online() : NULL;
}
+
+ if (has_user_cpus)
+ cpus = perf_cpu_map__get(user_cpus);
+ else
+ cpus = perf_cpu_map__get(pmu_cpus);
+
if (init_attr)
event_attr_init(attr);
evsel = evsel__new_idx(attr, *idx);
- if (!evsel) {
- perf_cpu_map__put(cpus);
- return NULL;
+ if (!evsel)
+ goto out_err;
+
+ if (name) {
+ evsel->name = strdup(name);
+ if (!evsel->name)
+ goto out_err;
+ }
+
+ if (metric_id) {
+ evsel->metric_id = strdup(metric_id);
+ if (!evsel->metric_id)
+ goto out_err;
}
(*idx)++;
evsel->core.cpus = cpus;
- evsel->core.own_cpus = perf_cpu_map__get(cpus);
+ evsel->core.pmu_cpus = pmu_cpus;
evsel->core.requires_cpu = pmu ? pmu->is_uncore : false;
- evsel->core.is_pmu_core = pmu ? pmu->is_core : false;
- evsel->auto_merge_stats = auto_merge_stats;
+ evsel->core.is_pmu_core = is_pmu_core;
evsel->pmu = pmu;
evsel->alternate_hw_config = alternate_hw_config;
-
- if (name)
- evsel->name = strdup(name);
-
- if (metric_id)
- evsel->metric_id = strdup(metric_id);
+ evsel->first_wildcard_match = first_wildcard_match;
if (config_terms)
list_splice_init(config_terms, &evsel->config_terms);
@@ -277,7 +306,17 @@ __add_event(struct list_head *list, int *idx,
if (list)
list_add_tail(&evsel->core.node, list);
+ if (has_user_cpus)
+ evsel__warn_user_requested_cpus(evsel, user_cpus);
+
return evsel;
+out_err:
+ perf_cpu_map__put(cpus);
+ perf_cpu_map__put(pmu_cpus);
+ zfree(&evsel->name);
+ zfree(&evsel->metric_id);
+ free(evsel);
+ return NULL;
}
struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
@@ -286,7 +325,7 @@ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
{
return __add_event(/*list=*/NULL, &idx, attr, /*init_attr=*/false, name,
metric_id, pmu, /*config_terms=*/NULL,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX);
}
@@ -297,7 +336,7 @@ static int add_event(struct list_head *list, int *idx,
{
return __add_event(list, idx, attr, /*init_attr*/true, name, metric_id,
/*pmu=*/NULL, config_terms,
- /*auto_merge_stats=*/false, /*cpu_list=*/NULL,
+ /*first_wildcard_match=*/NULL, /*cpu_list=*/NULL,
alternate_hw_config) ? 0 : -ENOMEM;
}
@@ -422,7 +461,7 @@ bool parse_events__filter_pmu(const struct parse_events_state *parse_state,
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config);
+ struct evsel *first_wildcard_match, u64 alternate_hw_config);
int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
struct parse_events_state *parse_state,
@@ -432,11 +471,13 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
bool found_supported = false;
const char *config_name = get_config_name(parsed_terms);
const char *metric_id = get_config_metric_id(parsed_terms);
+ struct perf_cpu_map *cpus = get_config_cpu(parsed_terms);
+ int ret = 0;
+ struct evsel *first_wildcard_match = NULL;
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
+ while ((pmu = perf_pmus__scan_for_event(pmu, name)) != NULL) {
LIST_HEAD(config_terms);
struct perf_event_attr attr;
- int ret;
if (parse_events__filter_pmu(parse_state, pmu))
continue;
@@ -448,10 +489,13 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
*/
ret = parse_events_add_pmu(parse_state, list, pmu,
parsed_terms,
- perf_pmu__auto_merge_stats(pmu),
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX);
if (ret)
- return ret;
+ goto out_err;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of(list->prev, struct evsel, core.node);
continue;
}
@@ -471,25 +515,32 @@ int parse_events_add_cache(struct list_head *list, int *idx, const char *name,
if (parsed_terms) {
if (config_attr(&attr, parsed_terms, parse_state->error,
- config_term_common))
- return -EINVAL;
-
- if (get_config_terms(parsed_terms, &config_terms))
- return -ENOMEM;
+ config_term_common)) {
+ ret = -EINVAL;
+ goto out_err;
+ }
+ if (get_config_terms(parsed_terms, &config_terms)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
}
if (__add_event(list, idx, &attr, /*init_attr*/true, config_name ?: name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
- return -ENOMEM;
+ metric_id, pmu, &config_terms, first_wildcard_match,
+ cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) == NULL)
+ ret = -ENOMEM;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match = container_of(list->prev, struct evsel, core.node);
free_config_terms(&config_terms);
+ if (ret)
+ goto out_err;
}
+out_err:
+ perf_cpu_map__put(cpus);
return found_supported ? 0 : -EINVAL;
}
-#ifdef HAVE_LIBTRACEEVENT
static void tracepoint_error(struct parse_events_error *e, int err,
const char *sys, const char *name, int column)
{
@@ -548,103 +599,84 @@ static int add_tracepoint(struct parse_events_state *parse_state,
return 0;
}
-static int add_tracepoint_multi_event(struct parse_events_state *parse_state,
- struct list_head *list,
- const char *sys_name, const char *evt_name,
- struct parse_events_error *err,
- struct parse_events_terms *head_config, YYLTYPE *loc)
-{
- char *evt_path;
- struct dirent *evt_ent;
- DIR *evt_dir;
- int ret = 0, found = 0;
-
- evt_path = get_events_file(sys_name);
- if (!evt_path) {
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
- }
- evt_dir = opendir(evt_path);
- if (!evt_dir) {
- put_events_file(evt_path);
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
- }
+struct add_tracepoint_multi_args {
+ struct parse_events_state *parse_state;
+ struct list_head *list;
+ const char *sys_glob;
+ const char *evt_glob;
+ struct parse_events_error *err;
+ struct parse_events_terms *head_config;
+ YYLTYPE *loc;
+ int found;
+};
- while (!ret && (evt_ent = readdir(evt_dir))) {
- if (!strcmp(evt_ent->d_name, ".")
- || !strcmp(evt_ent->d_name, "..")
- || !strcmp(evt_ent->d_name, "enable")
- || !strcmp(evt_ent->d_name, "filter"))
- continue;
+static int add_tracepoint_multi_event_cb(void *state, const char *sys_name, const char *evt_name)
+{
+ struct add_tracepoint_multi_args *args = state;
+ int ret;
- if (!strglobmatch(evt_ent->d_name, evt_name))
- continue;
+ if (!strglobmatch(evt_name, args->evt_glob))
+ return 0;
- found++;
+ args->found++;
+ ret = add_tracepoint(args->parse_state, args->list, sys_name, evt_name,
+ args->err, args->head_config, args->loc);
- ret = add_tracepoint(parse_state, list, sys_name, evt_ent->d_name,
- err, head_config, loc);
- }
+ return ret;
+}
- if (!found) {
- tracepoint_error(err, ENOENT, sys_name, evt_name, loc->first_column);
- ret = -1;
+static int add_tracepoint_multi_event(struct add_tracepoint_multi_args *args, const char *sys_name)
+{
+ if (strpbrk(args->evt_glob, "*?") == NULL) {
+ /* Not a glob. */
+ args->found++;
+ return add_tracepoint(args->parse_state, args->list, sys_name, args->evt_glob,
+ args->err, args->head_config, args->loc);
}
- put_events_file(evt_path);
- closedir(evt_dir);
- return ret;
+ return tp_pmu__for_each_tp_event(sys_name, args, add_tracepoint_multi_event_cb);
}
-static int add_tracepoint_event(struct parse_events_state *parse_state,
- struct list_head *list,
- const char *sys_name, const char *evt_name,
- struct parse_events_error *err,
- struct parse_events_terms *head_config, YYLTYPE *loc)
+static int add_tracepoint_multi_sys_cb(void *state, const char *sys_name)
{
- return strpbrk(evt_name, "*?") ?
- add_tracepoint_multi_event(parse_state, list, sys_name, evt_name,
- err, head_config, loc) :
- add_tracepoint(parse_state, list, sys_name, evt_name,
- err, head_config, loc);
+ struct add_tracepoint_multi_args *args = state;
+
+ if (!strglobmatch(sys_name, args->sys_glob))
+ return 0;
+
+ return add_tracepoint_multi_event(args, sys_name);
}
static int add_tracepoint_multi_sys(struct parse_events_state *parse_state,
struct list_head *list,
- const char *sys_name, const char *evt_name,
+ const char *sys_glob, const char *evt_glob,
struct parse_events_error *err,
struct parse_events_terms *head_config, YYLTYPE *loc)
{
- struct dirent *events_ent;
- DIR *events_dir;
- int ret = 0;
+ struct add_tracepoint_multi_args args = {
+ .parse_state = parse_state,
+ .list = list,
+ .sys_glob = sys_glob,
+ .evt_glob = evt_glob,
+ .err = err,
+ .head_config = head_config,
+ .loc = loc,
+ .found = 0,
+ };
+ int ret;
- events_dir = tracing_events__opendir();
- if (!events_dir) {
- tracepoint_error(err, errno, sys_name, evt_name, loc->first_column);
- return -1;
+ if (strpbrk(sys_glob, "*?") == NULL) {
+ /* Not a glob. */
+ ret = add_tracepoint_multi_event(&args, sys_glob);
+ } else {
+ ret = tp_pmu__for_each_tp_sys(&args, add_tracepoint_multi_sys_cb);
}
-
- while (!ret && (events_ent = readdir(events_dir))) {
- if (!strcmp(events_ent->d_name, ".")
- || !strcmp(events_ent->d_name, "..")
- || !strcmp(events_ent->d_name, "enable")
- || !strcmp(events_ent->d_name, "header_event")
- || !strcmp(events_ent->d_name, "header_page"))
- continue;
-
- if (!strglobmatch(events_ent->d_name, sys_name))
- continue;
-
- ret = add_tracepoint_event(parse_state, list, events_ent->d_name,
- evt_name, err, head_config, loc);
+ if (args.found == 0) {
+ tracepoint_error(err, ENOENT, sys_glob, evt_glob, loc->first_column);
+ return -ENOENT;
}
-
- closedir(events_dir);
return ret;
}
-#endif /* HAVE_LIBTRACEEVENT */
size_t default_breakpoint_len(void)
{
@@ -795,11 +827,13 @@ const char *parse_events__term_type_str(enum parse_events__term_type term_type)
[PARSE_EVENTS__TERM_TYPE_DRV_CFG] = "driver-config",
[PARSE_EVENTS__TERM_TYPE_PERCORE] = "percore",
[PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT] = "aux-output",
+ [PARSE_EVENTS__TERM_TYPE_AUX_ACTION] = "aux-action",
[PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE] = "aux-sample-size",
[PARSE_EVENTS__TERM_TYPE_METRIC_ID] = "metric-id",
[PARSE_EVENTS__TERM_TYPE_RAW] = "raw",
[PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE] = "legacy-cache",
[PARSE_EVENTS__TERM_TYPE_HARDWARE] = "hardware",
+ [PARSE_EVENTS__TERM_TYPE_CPU] = "cpu",
};
if ((unsigned int)term_type >= __PARSE_EVENTS__TERM_TYPE_NR)
return "unknown term";
@@ -829,6 +863,7 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_PERIOD:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
return true;
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_SAMPLE_FREQ:
@@ -844,6 +879,7 @@ config_term_avail(enum parse_events__term_type term_type, struct parse_events_er
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
@@ -963,6 +999,9 @@ do { \
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
CHECK_TYPE_VAL(NUM);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ CHECK_TYPE_VAL(STR);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
CHECK_TYPE_VAL(NUM);
if (term->val.num > UINT_MAX) {
@@ -972,6 +1011,32 @@ do { \
return -EINVAL;
}
break;
+ case PARSE_EVENTS__TERM_TYPE_CPU: {
+ struct perf_cpu_map *map;
+
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM) {
+ if (term->val.num >= (u64)cpu__max_present_cpu().cpu) {
+ parse_events_error__handle(err, term->err_val,
+ strdup("too big"),
+ /*help=*/NULL);
+ return -EINVAL;
+ }
+ break;
+ }
+ assert(term->type_val == PARSE_EVENTS__TERM_TYPE_STR);
+ if (perf_pmus__find(term->val.str) != NULL)
+ break;
+
+ map = perf_cpu_map__new(term->val.str);
+ if (!map) {
+ parse_events_error__handle(err, term->err_val,
+ strdup("not a valid PMU or CPU number"),
+ /*help=*/NULL);
+ return -EINVAL;
+ }
+ perf_cpu_map__put(map);
+ break;
+ }
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_USER:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
@@ -1066,7 +1131,6 @@ static int config_term_pmu(struct perf_event_attr *attr,
return config_term_common(attr, term, err);
}
-#ifdef HAVE_LIBTRACEEVENT
static int config_term_tracepoint(struct perf_event_attr *attr,
struct parse_events_term *term,
struct parse_events_error *err)
@@ -1081,6 +1145,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_OVERWRITE:
case PARSE_EVENTS__TERM_TYPE_NOOVERWRITE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
return config_term_common(attr, term, err);
case PARSE_EVENTS__TERM_TYPE_USER:
@@ -1099,6 +1164,7 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
if (err) {
parse_events_error__handle(err, term->err_term,
@@ -1111,7 +1177,6 @@ static int config_term_tracepoint(struct perf_event_attr *attr,
return 0;
}
-#endif
static int config_attr(struct perf_event_attr *attr,
const struct parse_events_terms *head,
@@ -1217,6 +1282,9 @@ do { \
ADD_CONFIG_TERM_VAL(AUX_OUTPUT, aux_output,
term->val.num ? 1 : 0, term->weak);
break;
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
+ ADD_CONFIG_TERM_STR(AUX_ACTION, term->val.str, term->weak);
+ break;
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
ADD_CONFIG_TERM_VAL(AUX_SAMPLE_SIZE, aux_sample_size,
term->val.num, term->weak);
@@ -1231,6 +1299,7 @@ do { \
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
break;
}
@@ -1279,11 +1348,13 @@ static int get_config_chgs(struct perf_pmu *pmu, struct parse_events_terms *head
case PARSE_EVENTS__TERM_TYPE_DRV_CFG:
case PARSE_EVENTS__TERM_TYPE_PERCORE:
case PARSE_EVENTS__TERM_TYPE_AUX_OUTPUT:
+ case PARSE_EVENTS__TERM_TYPE_AUX_ACTION:
case PARSE_EVENTS__TERM_TYPE_AUX_SAMPLE_SIZE:
case PARSE_EVENTS__TERM_TYPE_METRIC_ID:
case PARSE_EVENTS__TERM_TYPE_RAW:
case PARSE_EVENTS__TERM_TYPE_LEGACY_CACHE:
case PARSE_EVENTS__TERM_TYPE_HARDWARE:
+ case PARSE_EVENTS__TERM_TYPE_CPU:
default:
break;
}
@@ -1303,7 +1374,7 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
struct parse_events_terms *head_config, void *loc_)
{
YYLTYPE *loc = loc_;
-#ifdef HAVE_LIBTRACEEVENT
+
if (head_config) {
struct perf_event_attr attr;
@@ -1312,32 +1383,20 @@ int parse_events_add_tracepoint(struct parse_events_state *parse_state,
return -EINVAL;
}
- if (strpbrk(sys, "*?"))
- return add_tracepoint_multi_sys(parse_state, list, sys, event,
- err, head_config, loc);
- else
- return add_tracepoint_event(parse_state, list, sys, event,
- err, head_config, loc);
-#else
- (void)parse_state;
- (void)list;
- (void)sys;
- (void)event;
- (void)head_config;
- parse_events_error__handle(err, loc->first_column, strdup("unsupported tracepoint"),
- strdup("libtraceevent is necessary for tracepoint support"));
- return -1;
-#endif
+ return add_tracepoint_multi_sys(parse_state, list, sys, event,
+ err, head_config, loc);
}
static int __parse_events_add_numeric(struct parse_events_state *parse_state,
struct list_head *list,
struct perf_pmu *pmu, u32 type, u32 extended_type,
- u64 config, const struct parse_events_terms *head_config)
+ u64 config, const struct parse_events_terms *head_config,
+ struct evsel *first_wildcard_match)
{
struct perf_event_attr attr;
LIST_HEAD(config_terms);
const char *name, *metric_id;
+ struct perf_cpu_map *cpus;
int ret;
memset(&attr, 0, sizeof(attr));
@@ -1359,10 +1418,11 @@ static int __parse_events_add_numeric(struct parse_events_state *parse_state,
name = get_config_name(head_config);
metric_id = get_config_metric_id(head_config);
+ cpus = get_config_cpu(head_config);
ret = __add_event(list, &parse_state->idx, &attr, /*init_attr*/true, name,
- metric_id, pmu, &config_terms, /*auto_merge_stats=*/false,
- /*cpu_list=*/NULL, /*alternate_hw_config=*/PERF_COUNT_HW_MAX
- ) == NULL ? -ENOMEM : 0;
+ metric_id, pmu, &config_terms, first_wildcard_match,
+ cpus, /*alternate_hw_config=*/PERF_COUNT_HW_MAX) ? 0 : -ENOMEM;
+ perf_cpu_map__put(cpus);
free_config_terms(&config_terms);
return ret;
}
@@ -1378,6 +1438,7 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
/* Wildcards on numeric values are only supported by core PMUs. */
if (wildcard && perf_pmus__supports_extended_type()) {
+ struct evsel *first_wildcard_match = NULL;
while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
int ret;
@@ -1387,15 +1448,20 @@ int parse_events_add_numeric(struct parse_events_state *parse_state,
ret = __parse_events_add_numeric(parse_state, list, pmu,
type, pmu->type,
- config, head_config);
+ config, head_config,
+ first_wildcard_match);
if (ret)
return ret;
+ if (first_wildcard_match == NULL)
+ first_wildcard_match =
+ container_of(list->prev, struct evsel, core.node);
}
if (found_supported)
return 0;
}
return __parse_events_add_numeric(parse_state, list, perf_pmus__find_by_type(type),
- type, /*extended_type=*/0, config, head_config);
+ type, /*extended_type=*/0, config, head_config,
+ /*first_wildcard_match=*/NULL);
}
static bool config_term_percore(struct list_head *config_terms)
@@ -1413,7 +1479,7 @@ static bool config_term_percore(struct list_head *config_terms)
static int parse_events_add_pmu(struct parse_events_state *parse_state,
struct list_head *list, struct perf_pmu *pmu,
const struct parse_events_terms *const_parsed_terms,
- bool auto_merge_stats, u64 alternate_hw_config)
+ struct evsel *first_wildcard_match, u64 alternate_hw_config)
{
struct perf_event_attr attr;
struct perf_pmu_info info;
@@ -1422,6 +1488,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
LIST_HEAD(config_terms);
struct parse_events_terms parsed_terms;
bool alias_rewrote_terms = false;
+ struct perf_cpu_map *term_cpu = NULL;
if (verbose > 1) {
struct strbuf sb;
@@ -1449,7 +1516,7 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel = __add_event(list, &parse_state->idx, &attr,
/*init_attr=*/true, /*name=*/NULL,
/*metric_id=*/NULL, pmu,
- /*config_terms=*/NULL, auto_merge_stats,
+ /*config_terms=*/NULL, first_wildcard_match,
/*cpu_list=*/NULL, alternate_hw_config);
return evsel ? 0 : -ENOMEM;
}
@@ -1516,11 +1583,12 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
return -EINVAL;
}
+ term_cpu = get_config_cpu(&parsed_terms);
evsel = __add_event(list, &parse_state->idx, &attr, /*init_attr=*/true,
get_config_name(&parsed_terms),
get_config_metric_id(&parsed_terms), pmu,
- &config_terms, auto_merge_stats, /*cpu_list=*/NULL,
- alternate_hw_config);
+ &config_terms, first_wildcard_match, term_cpu, alternate_hw_config);
+ perf_cpu_map__put(term_cpu);
if (!evsel) {
parse_events_terms__exit(&parsed_terms);
return -ENOMEM;
@@ -1537,6 +1605,10 @@ static int parse_events_add_pmu(struct parse_events_state *parse_state,
evsel->scale = info.scale;
evsel->per_pkg = info.per_pkg;
evsel->snapshot = info.snapshot;
+ evsel->retirement_latency.mean = info.retirement_latency_mean;
+ evsel->retirement_latency.min = info.retirement_latency_min;
+ evsel->retirement_latency.max = info.retirement_latency_max;
+
return 0;
}
@@ -1552,6 +1624,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
int ok = 0;
const char *config;
struct parse_events_terms parsed_terms;
+ struct evsel *first_wildcard_match = NULL;
*listp = NULL;
@@ -1583,8 +1656,7 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
INIT_LIST_HEAD(list);
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- bool auto_merge_stats;
+ while ((pmu = perf_pmus__scan_for_event(pmu, event_name)) != NULL) {
if (parse_events__filter_pmu(parse_state, pmu))
continue;
@@ -1592,9 +1664,8 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
if (!perf_pmu__have_event(pmu, event_name))
continue;
- auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
if (!parse_events_add_pmu(parse_state, list, pmu,
- &parsed_terms, auto_merge_stats, hw_config)) {
+ &parsed_terms, first_wildcard_match, hw_config)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1603,11 +1674,13 @@ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
strbuf_release(&sb);
ok++;
}
+ if (first_wildcard_match == NULL)
+ first_wildcard_match = container_of(list->prev, struct evsel, core.node);
}
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, list, perf_pmus__fake_pmu(), &parsed_terms,
- /*auto_merge_stats=*/true, hw_config)) {
+ first_wildcard_match, hw_config)) {
struct strbuf sb;
strbuf_init(&sb, /*hint=*/ 0);
@@ -1638,6 +1711,7 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
struct perf_pmu *pmu;
int ok = 0;
char *help;
+ struct evsel *first_wildcard_match = NULL;
*listp = malloc(sizeof(**listp));
if (!*listp)
@@ -1648,32 +1722,35 @@ int parse_events_multi_pmu_add_or_add_pmu(struct parse_events_state *parse_state
/* Attempt to add to list assuming event_or_pmu is a PMU name. */
pmu = perf_pmus__find(event_or_pmu);
if (pmu && !parse_events_add_pmu(parse_state, *listp, pmu, const_parsed_terms,
- /*auto_merge_stats=*/false,
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX))
return 0;
if (parse_state->fake_pmu) {
if (!parse_events_add_pmu(parse_state, *listp, perf_pmus__fake_pmu(),
const_parsed_terms,
- /*auto_merge_stats=*/false,
+ first_wildcard_match,
/*alternate_hw_config=*/PERF_COUNT_HW_MAX))
return 0;
}
pmu = NULL;
/* Failed to add, try wildcard expansion of event_or_pmu as a PMU name. */
- while ((pmu = perf_pmus__scan(pmu)) != NULL) {
- if (!parse_events__filter_pmu(parse_state, pmu) &&
- perf_pmu__match(pmu, event_or_pmu)) {
- bool auto_merge_stats = perf_pmu__auto_merge_stats(pmu);
-
- if (!parse_events_add_pmu(parse_state, *listp, pmu,
- const_parsed_terms,
- auto_merge_stats,
- /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
- ok++;
- parse_state->wild_card_pmus = true;
- }
+ while ((pmu = perf_pmus__scan_matching_wildcard(pmu, event_or_pmu)) != NULL) {
+
+ if (parse_events__filter_pmu(parse_state, pmu))
+ continue;
+
+ if (!parse_events_add_pmu(parse_state, *listp, pmu,
+ const_parsed_terms,
+ first_wildcard_match,
+ /*alternate_hw_config=*/PERF_COUNT_HW_MAX)) {
+ ok++;
+ parse_state->wild_card_pmus = true;
+ }
+ if (first_wildcard_match == NULL) {
+ first_wildcard_match =
+ container_of((*listp)->prev, struct evsel, core.node);
}
}
if (ok)
@@ -1731,13 +1808,11 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
int eH = group ? evsel->core.attr.exclude_host : 0;
int eG = group ? evsel->core.attr.exclude_guest : 0;
int exclude = eu | ek | eh;
- int exclude_GH = group ? evsel->exclude_GH : 0;
+ int exclude_GH = eG | eH;
if (mod.user) {
if (!exclude)
exclude = eu = ek = eh = 1;
- if (!exclude_GH && !perf_guest && exclude_GH_default)
- eG = 1;
eu = 0;
}
if (mod.kernel) {
@@ -1760,6 +1835,13 @@ static int parse_events__modifier_list(struct parse_events_state *parse_state,
exclude_GH = eG = eH = 1;
eH = 0;
}
+ if (!exclude_GH && exclude_GH_default) {
+ if (perf_host)
+ eG = 1;
+ else if (perf_guest)
+ eH = 1;
+ }
+
evsel->core.attr.exclude_user = eu;
evsel->core.attr.exclude_kernel = ek;
evsel->core.attr.exclude_hv = eh;
@@ -1978,59 +2060,78 @@ static int evlist__cmp(void *_fg_idx, const struct list_head *l, const struct li
int *force_grouped_idx = _fg_idx;
int lhs_sort_idx, rhs_sort_idx, ret;
const char *lhs_pmu_name, *rhs_pmu_name;
- bool lhs_has_group, rhs_has_group;
/*
- * First sort by grouping/leader. Read the leader idx only if the evsel
- * is part of a group, by default ungrouped events will be sorted
- * relative to grouped events based on where the first ungrouped event
- * occurs. If both events don't have a group we want to fall-through to
- * the arch specific sorting, that can reorder and fix things like
- * Intel's topdown events.
+ * Get the indexes of the 2 events to sort. If the events are
+ * in groups then the leader's index is used otherwise the
+ * event's index is used. An index may be forced for events that
+ * must be in the same group, namely Intel topdown events.
*/
- if (lhs_core->leader != lhs_core || lhs_core->nr_members > 1) {
- lhs_has_group = true;
- lhs_sort_idx = lhs_core->leader->idx;
+ if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)) {
+ lhs_sort_idx = *force_grouped_idx;
} else {
- lhs_has_group = false;
- lhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(lhs)
- ? *force_grouped_idx
- : lhs_core->idx;
- }
- if (rhs_core->leader != rhs_core || rhs_core->nr_members > 1) {
- rhs_has_group = true;
- rhs_sort_idx = rhs_core->leader->idx;
+ bool lhs_has_group = lhs_core->leader != lhs_core || lhs_core->nr_members > 1;
+
+ lhs_sort_idx = lhs_has_group ? lhs_core->leader->idx : lhs_core->idx;
+ }
+ if (*force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)) {
+ rhs_sort_idx = *force_grouped_idx;
} else {
- rhs_has_group = false;
- rhs_sort_idx = *force_grouped_idx != -1 && arch_evsel__must_be_in_group(rhs)
- ? *force_grouped_idx
- : rhs_core->idx;
+ bool rhs_has_group = rhs_core->leader != rhs_core || rhs_core->nr_members > 1;
+
+ rhs_sort_idx = rhs_has_group ? rhs_core->leader->idx : rhs_core->idx;
}
+ /* If the indices differ then respect the insertion order. */
if (lhs_sort_idx != rhs_sort_idx)
return lhs_sort_idx - rhs_sort_idx;
- /* Group by PMU if there is a group. Groups can't span PMUs. */
- if (lhs_has_group && rhs_has_group) {
- lhs_pmu_name = lhs->group_pmu_name;
- rhs_pmu_name = rhs->group_pmu_name;
- ret = strcmp(lhs_pmu_name, rhs_pmu_name);
- if (ret)
- return ret;
- }
+ /*
+ * Ignoring forcing, lhs_sort_idx == rhs_sort_idx so lhs and rhs should
+ * be in the same group. Events in the same group need to be ordered by
+ * their grouping PMU name as the group will be broken to ensure only
+ * events on the same PMU are programmed together.
+ *
+ * With forcing the lhs_sort_idx == rhs_sort_idx shows that one or both
+ * events are being forced to be at force_group_index. If only one event
+ * is being forced then the other event is the group leader of the group
+ * we're trying to force the event into. Ensure for the force grouped
+ * case that the PMU name ordering is also respected.
+ */
+ lhs_pmu_name = lhs->group_pmu_name;
+ rhs_pmu_name = rhs->group_pmu_name;
+ ret = strcmp(lhs_pmu_name, rhs_pmu_name);
+ if (ret)
+ return ret;
- /* Architecture specific sorting. */
+ /*
+ * Architecture specific sorting, by default sort events in the same
+ * group with the same PMU by their insertion index. On Intel topdown
+ * constraints must be adhered to - slots first, etc.
+ */
return arch_evlist__cmp(lhs, rhs);
}
+int __weak arch_evlist__add_required_events(struct list_head *list __always_unused)
+{
+ return 0;
+}
+
static int parse_events__sort_events_and_fix_groups(struct list_head *list)
{
int idx = 0, force_grouped_idx = -1;
struct evsel *pos, *cur_leader = NULL;
struct perf_evsel *cur_leaders_grp = NULL;
- bool idx_changed = false, cur_leader_force_grouped = false;
+ bool idx_changed = false;
int orig_num_leaders = 0, num_leaders = 0;
int ret;
+ struct evsel *force_grouped_leader = NULL;
+ bool last_event_was_forced_leader = false;
+
+ /* On x86 topdown metrics events require a slots event. */
+ ret = arch_evlist__add_required_events(list);
+ if (ret)
+ return ret;
/*
* Compute index to insert ungrouped events at. Place them where the
@@ -2053,10 +2154,13 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
*/
pos->core.idx = idx++;
- /* Remember an index to sort all forced grouped events together to. */
- if (force_grouped_idx == -1 && pos == pos_leader && pos->core.nr_members < 2 &&
- arch_evsel__must_be_in_group(pos))
- force_grouped_idx = pos->core.idx;
+ /*
+ * Remember an index to sort all forced grouped events
+ * together to. Use the group leader as some events
+ * must appear first within the group.
+ */
+ if (force_grouped_idx == -1 && arch_evsel__must_be_in_group(pos))
+ force_grouped_idx = pos_leader->core.idx;
}
/* Sort events. */
@@ -2084,31 +2188,66 @@ static int parse_events__sort_events_and_fix_groups(struct list_head *list)
* Set the group leader respecting the given groupings and that
* groups can't span PMUs.
*/
- if (!cur_leader)
+ if (!cur_leader) {
cur_leader = pos;
+ cur_leaders_grp = &pos->core;
+ if (pos_force_grouped)
+ force_grouped_leader = pos;
+ }
cur_leader_pmu_name = cur_leader->group_pmu_name;
- if ((cur_leaders_grp != pos->core.leader &&
- (!pos_force_grouped || !cur_leader_force_grouped)) ||
- strcmp(cur_leader_pmu_name, pos_pmu_name)) {
- /* Event is for a different group/PMU than last. */
+ if (strcmp(cur_leader_pmu_name, pos_pmu_name)) {
+ /* PMU changed so the group/leader must change. */
cur_leader = pos;
- /*
- * Remember the leader's group before it is overwritten,
- * so that later events match as being in the same
- * group.
- */
cur_leaders_grp = pos->core.leader;
+ if (pos_force_grouped && force_grouped_leader == NULL)
+ force_grouped_leader = pos;
+ } else if (cur_leaders_grp != pos->core.leader) {
+ bool split_even_if_last_leader_was_forced = true;
+
/*
- * Avoid forcing events into groups with events that
- * don't need to be in the group.
+ * Event is for a different group. If the last event was
+ * the forced group leader then subsequent group events
+ * and forced events should be in the same group. If
+ * there are no other forced group events then the
+ * forced group leader wasn't really being forced into a
+ * group, it just set arch_evsel__must_be_in_group, and
+ * we don't want the group to split here.
*/
- cur_leader_force_grouped = pos_force_grouped;
+ if (force_grouped_idx != -1 && last_event_was_forced_leader) {
+ struct evsel *pos2 = pos;
+ /*
+ * Search the whole list as the group leaders
+ * aren't currently valid.
+ */
+ list_for_each_entry_continue(pos2, list, core.node) {
+ if (pos->core.leader == pos2->core.leader &&
+ arch_evsel__must_be_in_group(pos2)) {
+ split_even_if_last_leader_was_forced = false;
+ break;
+ }
+ }
+ }
+ if (!last_event_was_forced_leader || split_even_if_last_leader_was_forced) {
+ if (pos_force_grouped) {
+ if (force_grouped_leader) {
+ cur_leader = force_grouped_leader;
+ cur_leaders_grp = force_grouped_leader->core.leader;
+ } else {
+ cur_leader = force_grouped_leader = pos;
+ cur_leaders_grp = &pos->core;
+ }
+ } else {
+ cur_leader = pos;
+ cur_leaders_grp = pos->core.leader;
+ }
+ }
}
if (pos_leader != cur_leader) {
/* The leader changed so update it. */
evsel__set_leader(pos, cur_leader);
}
+ last_event_was_forced_leader = (force_grouped_leader == pos);
}
list_for_each_entry(pos, list, core.node) {
struct evsel *pos_leader = evsel__leader(pos);
@@ -2147,14 +2286,23 @@ int __parse_events(struct evlist *evlist, const char *str, const char *pmu_filte
if (ret2 < 0)
return ret;
- if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus)
- pr_warning("WARNING: events were regrouped to match PMUs\n");
-
/*
* Add list to the evlist even with errors to allow callers to clean up.
*/
evlist__splice_list_tail(evlist, &parse_state.list);
+ if (ret2 && warn_if_reordered && !parse_state.wild_card_pmus) {
+ pr_warning("WARNING: events were regrouped to match PMUs\n");
+
+ if (verbose > 0) {
+ struct strbuf sb = STRBUF_INIT;
+
+ evlist__uniquify_evsel_names(evlist, &stat_config);
+ evlist__format_evsels(evlist, &sb, 2048);
+ pr_debug("evlist after sorting/fixing: '%s'\n", sb.buf);
+ strbuf_release(&sb);
+ }
+ }
if (!ret) {
struct evsel *last;
@@ -2407,12 +2555,17 @@ foreach_evsel_in_last_glob(struct evlist *evlist,
return 0;
}
+/* Will a tracepoint filter work for str or should a BPF filter be used? */
+static bool is_possible_tp_filter(const char *str)
+{
+ return strstr(str, "uid") == NULL;
+}
+
static int set_filter(struct evsel *evsel, const void *arg)
{
const char *str = arg;
- bool found = false;
int nr_addr_filters = 0;
- struct perf_pmu *pmu = NULL;
+ struct perf_pmu *pmu;
if (evsel == NULL) {
fprintf(stderr,
@@ -2420,7 +2573,7 @@ static int set_filter(struct evsel *evsel, const void *arg)
return -1;
}
- if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
+ if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT && is_possible_tp_filter(str)) {
if (evsel__append_tp_filter(evsel, str) < 0) {
fprintf(stderr,
"not enough memory to hold filter string\n");
@@ -2430,16 +2583,11 @@ static int set_filter(struct evsel *evsel, const void *arg)
return 0;
}
- while ((pmu = perf_pmus__scan(pmu)) != NULL)
- if (pmu->type == evsel->core.attr.type) {
- found = true;
- break;
- }
-
- if (found)
+ pmu = evsel__find_pmu(evsel);
+ if (pmu) {
perf_pmu__scan_file(pmu, "nr_addr_filters",
"%d", &nr_addr_filters);
-
+ }
if (!nr_addr_filters)
return perf_bpf_filter__parse(&evsel->bpf_filters, str);
@@ -2461,6 +2609,30 @@ int parse_filter(const struct option *opt, const char *str,
(const void *)str);
}
+int parse_uid_filter(struct evlist *evlist, uid_t uid)
+{
+ struct option opt = {
+ .value = &evlist,
+ };
+ char buf[128];
+ int ret;
+
+ snprintf(buf, sizeof(buf), "uid == %d", uid);
+ ret = parse_filter(&opt, buf, /*unset=*/0);
+ if (ret) {
+ if (use_browser >= 1) {
+ /*
+ * Use ui__warning so a pop up appears above the
+ * underlying BPF error message.
+ */
+ ui__warning("Failed to add UID filtering that uses BPF filtering.\n");
+ } else {
+ fprintf(stderr, "Failed to add UID filtering that uses BPF filtering.\n");
+ }
+ }
+ return ret;
+}
+
static int add_exclude_perf_filter(struct evsel *evsel,
const void *arg __maybe_unused)
{