diff options
Diffstat (limited to 'tools/perf/arch/x86')
-rw-r--r-- | tools/perf/arch/x86/tests/dwarf-unwind.c | 2 | ||||
-rw-r--r-- | tools/perf/arch/x86/tests/insn-x86.c | 4 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/auxtrace.c | 4 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/event.c | 13 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/evlist.c | 45 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/intel-pt.c | 72 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/iostat.c | 7 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/kvm-stat.c | 15 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/pmu.c | 21 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/topdown.c | 78 | ||||
-rw-r--r-- | tools/perf/arch/x86/util/topdown.h | 1 |
11 files changed, 89 insertions, 173 deletions
diff --git a/tools/perf/arch/x86/tests/dwarf-unwind.c b/tools/perf/arch/x86/tests/dwarf-unwind.c index a54dea7c112f..497593be80f2 100644 --- a/tools/perf/arch/x86/tests/dwarf-unwind.c +++ b/tools/perf/arch/x86/tests/dwarf-unwind.c @@ -33,7 +33,7 @@ static int sample_ustack(struct perf_sample *sample, return -1; } - stack_size = map->end - sp; + stack_size = map__end(map) - sp; stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size; memcpy(buf, (void *) sp, stack_size); diff --git a/tools/perf/arch/x86/tests/insn-x86.c b/tools/perf/arch/x86/tests/insn-x86.c index 94b490c434d0..735257d205b5 100644 --- a/tools/perf/arch/x86/tests/insn-x86.c +++ b/tools/perf/arch/x86/tests/insn-x86.c @@ -29,6 +29,8 @@ struct test_data test_data_64[] = { #include "insn-x86-dat-64.c" {{0x0f, 0x01, 0xee}, 3, 0, NULL, NULL, "0f 01 ee \trdpkru"}, {{0x0f, 0x01, 0xef}, 3, 0, NULL, NULL, "0f 01 ef \twrpkru"}, + {{0xf2, 0x0f, 0x01, 0xca}, 4, 0, "erets", "indirect", "f2 0f 01 ca \terets"}, + {{0xf3, 0x0f, 0x01, 0xca}, 4, 0, "eretu", "indirect", "f3 0f 01 ca \teretu"}, {{0}, 0, 0, NULL, NULL, NULL}, }; @@ -49,6 +51,8 @@ static int get_op(const char *op_str) {"syscall", INTEL_PT_OP_SYSCALL}, {"sysret", INTEL_PT_OP_SYSRET}, {"vmentry", INTEL_PT_OP_VMENTRY}, + {"erets", INTEL_PT_OP_ERETS}, + {"eretu", INTEL_PT_OP_ERETU}, {NULL, 0}, }; struct val_data *val; diff --git a/tools/perf/arch/x86/util/auxtrace.c b/tools/perf/arch/x86/util/auxtrace.c index 3da506e13f49..330d03216b0e 100644 --- a/tools/perf/arch/x86/util/auxtrace.c +++ b/tools/perf/arch/x86/util/auxtrace.c @@ -26,11 +26,7 @@ struct auxtrace_record *auxtrace_record__init_intel(struct evlist *evlist, bool found_bts = false; intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME); - if (intel_pt_pmu) - intel_pt_pmu->auxtrace = true; intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME); - if (intel_bts_pmu) - intel_bts_pmu->auxtrace = true; evlist__for_each_entry(evlist, evsel) { if (intel_pt_pmu && evsel->core.attr.type == intel_pt_pmu->type) diff --git a/tools/perf/arch/x86/util/event.c b/tools/perf/arch/x86/util/event.c index e4288d09f3a0..5741ffe47312 100644 --- a/tools/perf/arch/x86/util/event.c +++ b/tools/perf/arch/x86/util/event.c @@ -19,7 +19,7 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, struct machine *machine) { int rc = 0; - struct map *pos; + struct map_rb_node *pos; struct maps *kmaps = machine__kernel_maps(machine); union perf_event *event = zalloc(sizeof(event->mmap) + machine->id_hdr_size); @@ -33,11 +33,12 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, maps__for_each_entry(kmaps, pos) { struct kmap *kmap; size_t size; + struct map *map = pos->map; - if (!__map__is_extra_kernel_map(pos)) + if (!__map__is_extra_kernel_map(map)) continue; - kmap = map__kmap(pos); + kmap = map__kmap(map); size = sizeof(event->mmap) - sizeof(event->mmap.filename) + PERF_ALIGN(strlen(kmap->name) + 1, sizeof(u64)) + @@ -58,9 +59,9 @@ int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, event->mmap.header.size = size; - event->mmap.start = pos->start; - event->mmap.len = pos->end - pos->start; - event->mmap.pgoff = pos->pgoff; + event->mmap.start = map__start(map); + event->mmap.len = map__size(map); + event->mmap.pgoff = map__pgoff(map); event->mmap.pid = machine->pid; strlcpy(event->mmap.filename, kmap->name, PATH_MAX); diff --git a/tools/perf/arch/x86/util/evlist.c b/tools/perf/arch/x86/util/evlist.c index cb59ce9b9638..d4193479a364 100644 --- a/tools/perf/arch/x86/util/evlist.c +++ b/tools/perf/arch/x86/util/evlist.c @@ -59,35 +59,28 @@ int arch_evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs) { - if (nr_attrs) - return ___evlist__add_default_attrs(evlist, attrs, nr_attrs); + if (!nr_attrs) + return 0; - return topdown_parse_events(evlist); + return ___evlist__add_default_attrs(evlist, attrs, nr_attrs); } -struct evsel *arch_evlist__leader(struct list_head *list) +int arch_evlist__cmp(const struct evsel *lhs, const struct evsel *rhs) { - struct evsel *evsel, *first, *slots = NULL; - bool has_topdown = false; - - first = list_first_entry(list, struct evsel, core.node); - - if (!topdown_sys_has_perf_metrics()) - return first; - - /* If there is a slots event and a topdown event then the slots event comes first. */ - __evlist__for_each_entry(list, evsel) { - if (evsel->pmu_name && !strncmp(evsel->pmu_name, "cpu", 3) && evsel->name) { - if (strcasestr(evsel->name, "slots")) { - slots = evsel; - if (slots == first) - return first; - } - if (strcasestr(evsel->name, "topdown")) - has_topdown = true; - if (slots && has_topdown) - return slots; - } + if (topdown_sys_has_perf_metrics() && + (!lhs->pmu_name || !strncmp(lhs->pmu_name, "cpu", 3))) { + /* Ensure the topdown slots comes first. */ + if (strcasestr(lhs->name, "slots")) + return -1; + if (strcasestr(rhs->name, "slots")) + return 1; + /* Followed by topdown events. */ + if (strcasestr(lhs->name, "topdown") && !strcasestr(rhs->name, "topdown")) + return -1; + if (!strcasestr(lhs->name, "topdown") && strcasestr(rhs->name, "topdown")) + return 1; } - return first; + + /* Default ordering by insertion index. */ + return lhs->core.idx - rhs->core.idx; } diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c index 1e39a034cee9..17336da08b58 100644 --- a/tools/perf/arch/x86/util/intel-pt.c +++ b/tools/perf/arch/x86/util/intel-pt.c @@ -194,16 +194,19 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) int pos = 0; u64 config; char c; + int dirfd; + + dirfd = perf_pmu__event_source_devices_fd(); pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc"); - if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d", - &mtc) != 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc", "%d", + &mtc) != 1) mtc = 1; if (mtc) { - if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x", - &mtc_periods) != 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/mtc_periods", "%x", + &mtc_periods) != 1) mtc_periods = 0; if (mtc_periods) { mtc_period = intel_pt_pick_bit(mtc_periods, 3); @@ -212,13 +215,13 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) } } - if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d", - &psb_cyc) != 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_cyc", "%d", + &psb_cyc) != 1) psb_cyc = 1; if (psb_cyc && mtc_periods) { - if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x", - &psb_periods) != 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "caps/psb_periods", "%x", + &psb_periods) != 1) psb_periods = 0; if (psb_periods) { psb_period = intel_pt_pick_bit(psb_periods, 3); @@ -227,8 +230,8 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) } } - if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && - perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 && + perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/branch", "%c", &c) == 1) pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch"); pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf); @@ -236,6 +239,7 @@ static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu) intel_pt_parse_terms(intel_pt_pmu->name, &intel_pt_pmu->format, buf, &config); + close(dirfd); return config; } @@ -488,7 +492,7 @@ static void intel_pt_valid_str(char *str, size_t len, u64 valid) } } -static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, +static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, int dirfd, const char *caps, const char *name, const char *supported, u64 config) { @@ -498,11 +502,11 @@ static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu, u64 bits; int ok; - if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1) + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, caps, "%llx", &valid) != 1) valid = 0; if (supported && - perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok) + perf_pmu__scan_file_at(intel_pt_pmu, dirfd, supported, "%d", &ok) == 1 && !ok) valid = 0; valid |= 1; @@ -531,56 +535,45 @@ out_err: static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu, struct evsel *evsel) { - int err; + int err, dirfd; char c; if (!evsel) return 0; + dirfd = perf_pmu__event_source_devices_fd(); + if (dirfd < 0) + return dirfd; + /* * If supported, force pass-through config term (pt=1) even if user * sets pt=0, which avoids senseless kernel errors. */ - if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 && + if (perf_pmu__scan_file_at(intel_pt_pmu, dirfd, "format/pt", "%c", &c) == 1 && !(evsel->core.attr.config & 1)) { pr_warning("pt=0 doesn't make sense, forcing pt=1\n"); evsel->core.attr.config |= 1; } - err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds", + err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/cycle_thresholds", "cyc_thresh", "caps/psb_cyc", evsel->core.attr.config); if (err) - return err; + goto out; - err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods", + err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/mtc_periods", "mtc_period", "caps/mtc", evsel->core.attr.config); if (err) - return err; + goto out; - return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods", + err = intel_pt_val_config_term(intel_pt_pmu, dirfd, "caps/psb_periods", "psb_period", "caps/psb_cyc", evsel->core.attr.config); -} -static void intel_pt_config_sample_mode(struct perf_pmu *intel_pt_pmu, - struct evsel *evsel) -{ - u64 user_bits = 0, bits; - struct evsel_config_term *term = evsel__get_config_term(evsel, CFG_CHG); - - if (term) - user_bits = term->val.cfg_chg; - - bits = perf_pmu__format_bits(&intel_pt_pmu->format, "psb_period"); - - /* Did user change psb_period */ - if (bits & user_bits) - return; - - /* Set psb_period to 0 */ - evsel->core.attr.config &= ~bits; +out: + close(dirfd); + return err; } static void intel_pt_min_max_sample_sz(struct evlist *evlist, @@ -674,7 +667,8 @@ static int intel_pt_recording_options(struct auxtrace_record *itr, return 0; if (opts->auxtrace_sample_mode) - intel_pt_config_sample_mode(intel_pt_pmu, intel_pt_evsel); + evsel__set_config_if_unset(intel_pt_pmu, intel_pt_evsel, + "psb_period", 0); err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel); if (err) diff --git a/tools/perf/arch/x86/util/iostat.c b/tools/perf/arch/x86/util/iostat.c index 7eb0a7b00b95..df7b5dfcc26a 100644 --- a/tools/perf/arch/x86/util/iostat.c +++ b/tools/perf/arch/x86/util/iostat.c @@ -10,6 +10,7 @@ #include <api/fs/fs.h> #include <linux/kernel.h> #include <linux/err.h> +#include <linux/zalloc.h> #include <limits.h> #include <stdio.h> #include <string.h> @@ -100,8 +101,8 @@ static void iio_root_ports_list_free(struct iio_root_ports_list *list) if (list) { for (idx = 0; idx < list->nr_entries; idx++) - free(list->rps[idx]); - free(list->rps); + zfree(&list->rps[idx]); + zfree(&list->rps); free(list); } } @@ -390,7 +391,7 @@ void iostat_release(struct evlist *evlist) evlist__for_each_entry(evlist, evsel) { if (rp != evsel->priv) { rp = evsel->priv; - free(evsel->priv); + zfree(&evsel->priv); } } } diff --git a/tools/perf/arch/x86/util/kvm-stat.c b/tools/perf/arch/x86/util/kvm-stat.c index c5dd54f6ef5e..424716518b75 100644 --- a/tools/perf/arch/x86/util/kvm-stat.c +++ b/tools/perf/arch/x86/util/kvm-stat.c @@ -18,7 +18,6 @@ static struct kvm_events_ops exit_events = { }; const char *vcpu_id_str = "vcpu_id"; -const int decode_str_len = 20; const char *kvm_exit_reason = "exit_reason"; const char *kvm_entry_trace = "kvm:kvm_entry"; const char *kvm_exit_trace = "kvm:kvm_exit"; @@ -47,7 +46,7 @@ static bool mmio_event_begin(struct evsel *evsel, return true; /* MMIO write begin event in kernel. */ - if (!strcmp(evsel->name, "kvm:kvm_mmio") && + if (evsel__name_is(evsel, "kvm:kvm_mmio") && evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { mmio_event_get_key(evsel, sample, key); return true; @@ -64,7 +63,7 @@ static bool mmio_event_end(struct evsel *evsel, struct perf_sample *sample, return true; /* MMIO read end event in kernel.*/ - if (!strcmp(evsel->name, "kvm:kvm_mmio") && + if (evsel__name_is(evsel, "kvm:kvm_mmio") && evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { mmio_event_get_key(evsel, sample, key); return true; @@ -77,7 +76,7 @@ static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char *decode) { - scnprintf(decode, decode_str_len, "%#lx:%s", + scnprintf(decode, KVM_EVENT_NAME_LEN, "%#lx:%s", (unsigned long)key->key, key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); } @@ -102,7 +101,7 @@ static bool ioport_event_begin(struct evsel *evsel, struct perf_sample *sample, struct event_key *key) { - if (!strcmp(evsel->name, "kvm:kvm_pio")) { + if (evsel__name_is(evsel, "kvm:kvm_pio")) { ioport_event_get_key(evsel, sample, key); return true; } @@ -121,7 +120,7 @@ static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char *decode) { - scnprintf(decode, decode_str_len, "%#llx:%s", + scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s", (unsigned long long)key->key, key->info ? "POUT" : "PIN"); } @@ -146,7 +145,7 @@ static bool msr_event_begin(struct evsel *evsel, struct perf_sample *sample, struct event_key *key) { - if (!strcmp(evsel->name, "kvm:kvm_msr")) { + if (evsel__name_is(evsel, "kvm:kvm_msr")) { msr_event_get_key(evsel, sample, key); return true; } @@ -165,7 +164,7 @@ static void msr_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char *decode) { - scnprintf(decode, decode_str_len, "%#llx:%s", + scnprintf(decode, KVM_EVENT_NAME_LEN, "%#llx:%s", (unsigned long long)key->key, key->info ? "W" : "R"); } diff --git a/tools/perf/arch/x86/util/pmu.c b/tools/perf/arch/x86/util/pmu.c index 358340b34243..3c0de3370d7e 100644 --- a/tools/perf/arch/x86/util/pmu.c +++ b/tools/perf/arch/x86/util/pmu.c @@ -27,10 +27,14 @@ static bool cached_list; struct perf_event_attr *perf_pmu__get_default_config(struct perf_pmu *pmu __maybe_unused) { #ifdef HAVE_AUXTRACE_SUPPORT - if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) + if (!strcmp(pmu->name, INTEL_PT_PMU_NAME)) { + pmu->auxtrace = true; return intel_pt_pmu_default_config(pmu); - if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME)) + } + if (!strcmp(pmu->name, INTEL_BTS_PMU_NAME)) { + pmu->auxtrace = true; pmu->selectable = true; + } #endif return NULL; } @@ -67,7 +71,7 @@ out_delete: static int setup_pmu_alias_list(void) { - char path[PATH_MAX]; + int fd, dirfd; DIR *dir; struct dirent *dent; struct pmu_alias *pmu_alias; @@ -75,10 +79,11 @@ static int setup_pmu_alias_list(void) FILE *file; int ret = -ENOMEM; - if (!perf_pmu__event_source_devices_scnprintf(path, sizeof(path))) + dirfd = perf_pmu__event_source_devices_fd(); + if (dirfd < 0) return -1; - dir = opendir(path); + dir = fdopendir(dirfd); if (!dir) return -errno; @@ -87,11 +92,11 @@ static int setup_pmu_alias_list(void) !strcmp(dent->d_name, "..")) continue; - perf_pmu__pathname_scnprintf(path, sizeof(path), dent->d_name, "alias"); - if (!file_available(path)) + fd = perf_pmu__pathname_fd(dirfd, dent->d_name, "alias", O_RDONLY); + if (fd < 0) continue; - file = fopen(path, "r"); + file = fdopen(fd, "r"); if (!file) continue; diff --git a/tools/perf/arch/x86/util/topdown.c b/tools/perf/arch/x86/util/topdown.c index 54810f9acd6f..9ad5e5c7bd27 100644 --- a/tools/perf/arch/x86/util/topdown.c +++ b/tools/perf/arch/x86/util/topdown.c @@ -1,19 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 -#include <stdio.h> #include "api/fs/fs.h" +#include "util/evsel.h" #include "util/pmu.h" #include "util/topdown.h" -#include "util/evlist.h" -#include "util/debug.h" -#include "util/pmu-hybrid.h" #include "topdown.h" #include "evsel.h" -#define TOPDOWN_L1_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound}" -#define TOPDOWN_L1_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/}" -#define TOPDOWN_L2_EVENTS "{slots,topdown-retiring,topdown-bad-spec,topdown-fe-bound,topdown-be-bound,topdown-heavy-ops,topdown-br-mispredict,topdown-fetch-lat,topdown-mem-bound}" -#define TOPDOWN_L2_EVENTS_CORE "{slots,cpu_core/topdown-retiring/,cpu_core/topdown-bad-spec/,cpu_core/topdown-fe-bound/,cpu_core/topdown-be-bound/,cpu_core/topdown-heavy-ops/,cpu_core/topdown-br-mispredict/,cpu_core/topdown-fetch-lat/,cpu_core/topdown-mem-bound/}" - /* Check whether there is a PMU which supports the perf metrics. */ bool topdown_sys_has_perf_metrics(void) { @@ -38,30 +30,6 @@ bool topdown_sys_has_perf_metrics(void) return has_perf_metrics; } -/* - * Check whether we can use a group for top down. - * Without a group may get bad results due to multiplexing. - */ -bool arch_topdown_check_group(bool *warn) -{ - int n; - - if (sysctl__read_int("kernel/nmi_watchdog", &n) < 0) - return false; - if (n > 0) { - *warn = true; - return false; - } - return true; -} - -void arch_topdown_group_warn(void) -{ - fprintf(stderr, - "nmi_watchdog enabled with topdown. May give wrong results.\n" - "Disable with echo 0 > /proc/sys/kernel/nmi_watchdog\n"); -} - #define TOPDOWN_SLOTS 0x0400 /* @@ -70,7 +38,6 @@ void arch_topdown_group_warn(void) * Only Topdown metric supports sample-read. The slots * event must be the leader of the topdown group. */ - bool arch_topdown_sample_read(struct evsel *leader) { if (!evsel__sys_has_perf_metrics(leader)) @@ -81,46 +48,3 @@ bool arch_topdown_sample_read(struct evsel *leader) return false; } - -const char *arch_get_topdown_pmu_name(struct evlist *evlist, bool warn) -{ - const char *pmu_name; - - if (!perf_pmu__has_hybrid()) - return "cpu"; - - if (!evlist->hybrid_pmu_name) { - if (warn) - pr_warning("WARNING: default to use cpu_core topdown events\n"); - evlist->hybrid_pmu_name = perf_pmu__hybrid_type_to_pmu("core"); - } - - pmu_name = evlist->hybrid_pmu_name; - - return pmu_name; -} - -int topdown_parse_events(struct evlist *evlist) -{ - const char *topdown_events; - const char *pmu_name; - - if (!topdown_sys_has_perf_metrics()) - return 0; - - pmu_name = arch_get_topdown_pmu_name(evlist, false); - - if (pmu_have_event(pmu_name, "topdown-heavy-ops")) { - if (!strcmp(pmu_name, "cpu_core")) - topdown_events = TOPDOWN_L2_EVENTS_CORE; - else - topdown_events = TOPDOWN_L2_EVENTS; - } else { - if (!strcmp(pmu_name, "cpu_core")) - topdown_events = TOPDOWN_L1_EVENTS_CORE; - else - topdown_events = TOPDOWN_L1_EVENTS; - } - - return parse_event(evlist, topdown_events); -} diff --git a/tools/perf/arch/x86/util/topdown.h b/tools/perf/arch/x86/util/topdown.h index 7eb81f042838..46bf9273e572 100644 --- a/tools/perf/arch/x86/util/topdown.h +++ b/tools/perf/arch/x86/util/topdown.h @@ -3,6 +3,5 @@ #define _TOPDOWN_H 1 bool topdown_sys_has_perf_metrics(void); -int topdown_parse_events(struct evlist *evlist); #endif |