summaryrefslogtreecommitdiff
path: root/tools/perf/util/tool_pmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/tool_pmu.c')
-rw-r--r--tools/perf/util/tool_pmu.c90
1 files changed, 70 insertions, 20 deletions
diff --git a/tools/perf/util/tool_pmu.c b/tools/perf/util/tool_pmu.c
index 4fb097578479..d99e699e646d 100644
--- a/tools/perf/util/tool_pmu.c
+++ b/tools/perf/util/tool_pmu.c
@@ -62,7 +62,8 @@ int tool_pmu__num_skip_events(void)
const char *tool_pmu__event_to_str(enum tool_pmu_event ev)
{
- if (ev > TOOL_PMU__EVENT_NONE && ev < TOOL_PMU__EVENT_MAX)
+ if ((ev > TOOL_PMU__EVENT_NONE && ev < TOOL_PMU__EVENT_MAX) &&
+ !tool_pmu__skip_event(tool_pmu__event_names[ev]))
return tool_pmu__event_names[ev];
return NULL;
@@ -331,7 +332,7 @@ static bool has_pmem(void)
return has_pmem;
}
-bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
+bool tool_pmu__read_event(enum tool_pmu_event ev, struct evsel *evsel, u64 *result)
{
const struct cpu_topology *topology;
@@ -346,17 +347,60 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
return true;
case TOOL_PMU__EVENT_NUM_CPUS:
- *result = cpu__max_present_cpu().cpu;
+ if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) {
+ /* No evsel to be specific to. */
+ *result = cpu__max_present_cpu().cpu;
+ } else if (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) {
+ /* Evsel just has specific CPUs. */
+ *result = perf_cpu_map__nr(evsel->core.cpus);
+ } else {
+ /*
+ * "Any CPU" event that can be scheduled on any CPU in
+ * the PMU's cpumask. The PMU cpumask should be saved in
+ * pmu_cpus. If not present fall back to max.
+ */
+ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus))
+ *result = perf_cpu_map__nr(evsel->core.pmu_cpus);
+ else
+ *result = cpu__max_present_cpu().cpu;
+ }
return true;
case TOOL_PMU__EVENT_NUM_CPUS_ONLINE: {
struct perf_cpu_map *online = cpu_map__online();
- if (online) {
+ if (!online)
+ return false;
+
+ if (!evsel || perf_cpu_map__is_empty(evsel->core.cpus)) {
+ /* No evsel to be specific to. */
*result = perf_cpu_map__nr(online);
- return true;
+ } else if (!perf_cpu_map__has_any_cpu(evsel->core.cpus)) {
+ /* Evsel just has specific CPUs. */
+ struct perf_cpu_map *tmp =
+ perf_cpu_map__intersect(online, evsel->core.cpus);
+
+ *result = perf_cpu_map__nr(tmp);
+ perf_cpu_map__put(tmp);
+ } else {
+ /*
+ * "Any CPU" event that can be scheduled on any CPU in
+ * the PMU's cpumask. The PMU cpumask should be saved in
+ * pmu_cpus, if not present then just the online cpu
+ * mask.
+ */
+ if (!perf_cpu_map__is_empty(evsel->core.pmu_cpus)) {
+ struct perf_cpu_map *tmp =
+ perf_cpu_map__intersect(online, evsel->core.pmu_cpus);
+
+ *result = perf_cpu_map__nr(tmp);
+ perf_cpu_map__put(tmp);
+ } else {
+ *result = perf_cpu_map__nr(online);
+ }
}
- return false;
+ perf_cpu_map__put(online);
+ return true;
}
case TOOL_PMU__EVENT_NUM_DIES:
topology = online_topology();
@@ -415,7 +459,7 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
old_count = perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
val = 0;
if (cpu_map_idx == 0 && thread == 0) {
- if (!tool_pmu__read_event(ev, &val)) {
+ if (!tool_pmu__read_event(ev, evsel, &val)) {
count->lost++;
val = 0;
}
@@ -484,22 +528,28 @@ int evsel__tool_pmu_read(struct evsel *evsel, int cpu_map_idx, int thread)
delta_start *= 1000000000 / ticks_per_sec;
}
count->val = delta_start;
- count->ena = count->run = delta_start;
count->lost = 0;
+ /*
+ * The values of enabled and running must make a ratio of 100%. The
+ * exact values don't matter as long as they are non-zero to avoid
+ * issues with evsel__count_has_error.
+ */
+ count->ena++;
+ count->run++;
return 0;
}
-struct perf_pmu *perf_pmus__tool_pmu(void)
+struct perf_pmu *tool_pmu__new(void)
{
- static struct perf_pmu tool = {
- .name = "tool",
- .type = PERF_PMU_TYPE_TOOL,
- .aliases = LIST_HEAD_INIT(tool.aliases),
- .caps = LIST_HEAD_INIT(tool.caps),
- .format = LIST_HEAD_INIT(tool.format),
- };
- if (!tool.events_table)
- tool.events_table = find_core_events_table("common", "common");
-
- return &tool;
+ struct perf_pmu *tool = zalloc(sizeof(struct perf_pmu));
+
+ if (!tool)
+ return NULL;
+
+ if (perf_pmu__init(tool, PERF_PMU_TYPE_TOOL, "tool") != 0) {
+ perf_pmu__delete(tool);
+ return NULL;
+ }
+ tool->events_table = find_core_events_table("common", "common");
+ return tool;
}