summaryrefslogtreecommitdiff
path: root/tools/perf
diff options
context:
space:
mode:
authorNamhyung Kim <namhyung@kernel.org>2022-10-18 05:02:16 +0300
committerArnaldo Carvalho de Melo <acme@redhat.com>2022-10-27 22:37:25 +0300
commitf976bc6b6bfc9b14eeaf9a8859191c8f85c253dc (patch)
treed31c7bce986a7920a96fd94052c9bc3492c2ca24 /tools/perf
parent1f297a6eb2bd90663518cbb6e9e2a3b2add34b73 (diff)
downloadlinux-f976bc6b6bfc9b14eeaf9a8859191c8f85c253dc.tar.xz
perf stat: Aggregate events using evsel->stats->aggr
Add a logic to aggregate counter values to the new evsel->stats->aggr. This is not used yet so shadow stats are not updated. But later patch will convert the existing code to use it. With that, we don't need to handle AGGR_GLOBAL specially anymore. It can use the same logic with counts, prev_counts and aggr_counts. Signed-off-by: Namhyung Kim <namhyung@kernel.org> Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: Athira Jajeev <atrajeev@linux.vnet.ibm.com> Cc: Ian Rogers <irogers@google.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: James Clark <james.clark@arm.com> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Kan Liang <kan.liang@linux.intel.com> Cc: Leo Yan <leo.yan@linaro.org> Cc: Michael Petlan <mpetlan@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com> Link: https://lore.kernel.org/r/20221018020227.85905-10-namhyung@kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/builtin-stat.c3
-rw-r--r--tools/perf/util/evsel.c9
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c6
-rw-r--r--tools/perf/util/stat.c46
4 files changed, 41 insertions, 23 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 92a8e4512f98..abede56d79b6 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -963,9 +963,6 @@ try_again_reset:
init_stats(&walltime_nsecs_stats);
update_stats(&walltime_nsecs_stats, t1 - t0);
- if (stat_config.aggr_mode == AGGR_GLOBAL)
- evlist__save_aggr_prev_raw_counts(evsel_list);
-
evlist__copy_prev_raw_counts(evsel_list);
evlist__reset_prev_raw_counts(evsel_list);
perf_stat__reset_shadow_per_stat(&rt_stat);
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index c9fef26d0702..cdde5b5f8ad2 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -1526,13 +1526,8 @@ void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
if (!evsel->prev_raw_counts)
return;
- if (cpu_map_idx == -1) {
- tmp = evsel->prev_raw_counts->aggr;
- evsel->prev_raw_counts->aggr = *count;
- } else {
- tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
- *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
- }
+ tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
+ *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
count->val = count->val - tmp.val;
count->ena = count->ena - tmp.ena;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 1f2040f36d4e..7bc8559dce6a 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -1653,12 +1653,6 @@ static void python_process_stat(struct perf_stat_config *config,
struct perf_cpu_map *cpus = counter->core.cpus;
int cpu, thread;
- if (config->aggr_mode == AGGR_GLOBAL) {
- process_stat(counter, (struct perf_cpu){ .cpu = -1 }, -1, tstamp,
- &counter->counts->aggr);
- return;
- }
-
for (thread = 0; thread < threads->nr; thread++) {
for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
process_stat(counter, perf_cpu_map__cpu(cpus, cpu),
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 374149628507..99874254809d 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -387,6 +387,7 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
struct perf_counts_values *count)
{
struct perf_counts_values *aggr = &evsel->counts->aggr;
+ struct perf_stat_evsel *ps = evsel->stats;
static struct perf_counts_values zero;
bool skip = false;
@@ -398,6 +399,44 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
if (skip)
count = &zero;
+ if (!evsel->snapshot)
+ evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
+ perf_counts_values__scale(count, config->scale, NULL);
+
+ if (ps->aggr) {
+ struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
+ struct aggr_cpu_id aggr_id = config->aggr_get_id(config, cpu);
+ struct perf_stat_aggr *ps_aggr;
+ int i;
+
+ for (i = 0; i < ps->nr_aggr; i++) {
+ if (!aggr_cpu_id__equal(&aggr_id, &config->aggr_map->map[i]))
+ continue;
+
+ ps_aggr = &ps->aggr[i];
+ ps_aggr->nr++;
+
+ /*
+ * When any result is bad, make them all to give
+ * consistent output in interval mode.
+ */
+ if (count->ena == 0 || count->run == 0 ||
+ evsel->counts->scaled == -1) {
+ ps_aggr->counts.val = 0;
+ ps_aggr->counts.ena = 0;
+ ps_aggr->counts.run = 0;
+ ps_aggr->failed = true;
+ }
+
+ if (!ps_aggr->failed) {
+ ps_aggr->counts.val += count->val;
+ ps_aggr->counts.ena += count->ena;
+ ps_aggr->counts.run += count->run;
+ }
+ break;
+ }
+ }
+
switch (config->aggr_mode) {
case AGGR_THREAD:
case AGGR_CORE:
@@ -405,9 +444,6 @@ process_counter_values(struct perf_stat_config *config, struct evsel *evsel,
case AGGR_SOCKET:
case AGGR_NODE:
case AGGR_NONE:
- if (!evsel->snapshot)
- evsel__compute_deltas(evsel, cpu_map_idx, thread, count);
- perf_counts_values__scale(count, config->scale, NULL);
if ((config->aggr_mode == AGGR_NONE) && (!evsel->percore)) {
perf_stat__update_shadow_stats(evsel, count->val,
cpu_map_idx, &rt_stat);
@@ -469,10 +505,6 @@ int perf_stat_process_counter(struct perf_stat_config *config,
if (config->aggr_mode != AGGR_GLOBAL)
return 0;
- if (!counter->snapshot)
- evsel__compute_deltas(counter, -1, -1, aggr);
- perf_counts_values__scale(aggr, config->scale, &counter->counts->scaled);
-
update_stats(&ps->res_stats, *count);
if (verbose > 0) {