summaryrefslogtreecommitdiff
path: root/tools/perf/util/bpf_ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/bpf_ftrace.c')
-rw-r--r--tools/perf/util/bpf_ftrace.c104
1 files changed, 81 insertions, 23 deletions
diff --git a/tools/perf/util/bpf_ftrace.c b/tools/perf/util/bpf_ftrace.c
index 06d1c4018407..0cb02412043c 100644
--- a/tools/perf/util/bpf_ftrace.c
+++ b/tools/perf/util/bpf_ftrace.c
@@ -11,6 +11,7 @@
#include "util/debug.h"
#include "util/evlist.h"
#include "util/bpf_counter.h"
+#include "util/stat.h"
#include "util/bpf_skel/func_latency.skel.h"
@@ -20,15 +21,26 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
{
int fd, err;
int i, ncpus = 1, ntasks = 1;
- struct filter_entry *func;
+ struct filter_entry *func = NULL;
- if (!list_is_singular(&ftrace->filters)) {
- pr_err("ERROR: %s target function(s).\n",
- list_empty(&ftrace->filters) ? "No" : "Too many");
- return -1;
- }
+ if (!list_empty(&ftrace->filters)) {
+ if (!list_is_singular(&ftrace->filters)) {
+ pr_err("ERROR: Too many target functions.\n");
+ return -1;
+ }
+ func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ } else {
+ int count = 0;
+ struct list_head *pos;
- func = list_first_entry(&ftrace->filters, struct filter_entry, list);
+ list_for_each(pos, &ftrace->event_pair)
+ count++;
+
+ if (count != 2) {
+ pr_err("ERROR: Needs two target events.\n");
+ return -1;
+ }
+ }
skel = func_latency_bpf__open();
if (!skel) {
@@ -36,6 +48,13 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
return -1;
}
+ skel->rodata->bucket_range = ftrace->bucket_range;
+ skel->rodata->min_latency = ftrace->min_latency;
+ skel->rodata->bucket_num = ftrace->bucket_num;
+ if (ftrace->bucket_range && ftrace->bucket_num) {
+ bpf_map__set_max_entries(skel->maps.latency, ftrace->bucket_num);
+ }
+
/* don't need to set cpu filter for system-wide mode */
if (ftrace->target.cpu_list) {
ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
@@ -83,20 +102,46 @@ int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
}
}
- skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
- false, func->name);
- if (IS_ERR(skel->links.func_begin)) {
- pr_err("Failed to attach fentry program\n");
- err = PTR_ERR(skel->links.func_begin);
- goto out;
- }
+ skel->bss->min = INT64_MAX;
- skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
- true, func->name);
- if (IS_ERR(skel->links.func_end)) {
- pr_err("Failed to attach fexit program\n");
- err = PTR_ERR(skel->links.func_end);
- goto out;
+ if (func) {
+ skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
+ false, func->name);
+ if (IS_ERR(skel->links.func_begin)) {
+ pr_err("Failed to attach fentry program\n");
+ err = PTR_ERR(skel->links.func_begin);
+ goto out;
+ }
+
+ skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
+ true, func->name);
+ if (IS_ERR(skel->links.func_end)) {
+ pr_err("Failed to attach fexit program\n");
+ err = PTR_ERR(skel->links.func_end);
+ goto out;
+ }
+ } else {
+ struct filter_entry *event;
+
+ event = list_first_entry(&ftrace->event_pair, struct filter_entry, list);
+
+ skel->links.event_begin = bpf_program__attach_raw_tracepoint(skel->progs.event_begin,
+ event->name);
+ if (IS_ERR(skel->links.event_begin)) {
+ pr_err("Failed to attach first tracepoint program\n");
+ err = PTR_ERR(skel->links.event_begin);
+ goto out;
+ }
+
+ event = list_next_entry(event, list);
+
+ skel->links.event_end = bpf_program__attach_raw_tracepoint(skel->progs.event_end,
+ event->name);
+ if (IS_ERR(skel->links.event_end)) {
+ pr_err("Failed to attach second tracepoint program\n");
+ err = PTR_ERR(skel->links.event_end);
+ goto out;
+ }
}
/* XXX: we don't actually use this fd - just for poll() */
@@ -118,8 +163,8 @@ int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
return 0;
}
-int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
- int buckets[])
+int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace,
+ int buckets[], struct stats *stats)
{
int i, fd, err;
u32 idx;
@@ -132,7 +177,7 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
if (hist == NULL)
return -ENOMEM;
- for (idx = 0; idx < NUM_BUCKET; idx++) {
+ for (idx = 0; idx < skel->rodata->bucket_num; idx++) {
err = bpf_map_lookup_elem(fd, &idx, hist);
if (err) {
buckets[idx] = 0;
@@ -143,6 +188,19 @@ int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
buckets[idx] += hist[i];
}
+ if (skel->bss->count) {
+ stats->mean = skel->bss->total / skel->bss->count;
+ stats->n = skel->bss->count;
+ stats->max = skel->bss->max;
+ stats->min = skel->bss->min;
+
+ if (!ftrace->use_nsec) {
+ stats->mean /= 1000;
+ stats->max /= 1000;
+ stats->min /= 1000;
+ }
+ }
+
free(hist);
return 0;
}