diff options
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r-- | tools/perf/builtin-trace.c | 101 |
1 files changed, 0 insertions, 101 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c index 071e7598391f..543c379d2a57 100644 --- a/tools/perf/builtin-trace.c +++ b/tools/perf/builtin-trace.c @@ -122,7 +122,6 @@ struct trace { struct syscalltbl *sctbl; struct { struct syscall *table; - struct bpf_map *map; struct { // per syscall BPF_MAP_TYPE_PROG_ARRAY struct bpf_map *sys_enter, *sys_exit; @@ -1225,16 +1224,6 @@ struct syscall { }; /* - * Must match what is in the BPF program: - * - * tools/perf/examples/bpf/augmented_raw_syscalls.c - */ -struct bpf_map_syscall_entry { - bool enabled; - u16 string_args_len[RAW_SYSCALL_ARGS_NUM]; -}; - -/* * We need to have this 'calculated' boolean because in some cases we really * don't know what is the duration of a syscall, for instance, when we start * a session and some threads are waiting for a syscall to finish, say 'poll', @@ -3259,7 +3248,6 @@ static void trace__set_bpf_map_filtered_pids(struct trace *trace) static void trace__set_bpf_map_syscalls(struct trace *trace) { - trace->syscalls.map = trace__find_bpf_map_by_name(trace, "syscalls"); trace->syscalls.prog_array.sys_enter = trace__find_bpf_map_by_name(trace, "syscalls_sys_enter"); trace->syscalls.prog_array.sys_exit = trace__find_bpf_map_by_name(trace, "syscalls_sys_exit"); } @@ -3339,80 +3327,6 @@ static int trace__bpf_prog_sys_exit_fd(struct trace *trace, int id) return sc ? bpf_program__fd(sc->bpf_prog.sys_exit) : bpf_program__fd(trace->syscalls.unaugmented_prog); } -static void trace__init_bpf_map_syscall_args(struct trace *trace, int id, struct bpf_map_syscall_entry *entry) -{ - struct syscall *sc = trace__syscall_info(trace, NULL, id); - int arg = 0; - - if (sc == NULL) - goto out; - - for (; arg < sc->nr_args; ++arg) { - entry->string_args_len[arg] = 0; - if (sc->arg_fmt[arg].scnprintf == SCA_FILENAME) { - /* Should be set like strace -s strsize */ - entry->string_args_len[arg] = PATH_MAX; - } - } -out: - for (; arg < 6; ++arg) - entry->string_args_len[arg] = 0; -} -static int trace__set_ev_qualifier_bpf_filter(struct trace *trace) -{ - int fd = bpf_map__fd(trace->syscalls.map); - struct bpf_map_syscall_entry value = { - .enabled = !trace->not_ev_qualifier, - }; - int err = 0; - size_t i; - - for (i = 0; i < trace->ev_qualifier_ids.nr; ++i) { - int key = trace->ev_qualifier_ids.entries[i]; - - if (value.enabled) { - trace__init_bpf_map_syscall_args(trace, key, &value); - trace__init_syscall_bpf_progs(trace, key); - } - - err = bpf_map_update_elem(fd, &key, &value, BPF_EXIST); - if (err) - break; - } - - return err; -} - -static int __trace__init_syscalls_bpf_map(struct trace *trace, bool enabled) -{ - int fd = bpf_map__fd(trace->syscalls.map); - struct bpf_map_syscall_entry value = { - .enabled = enabled, - }; - int err = 0, key; - - for (key = 0; key < trace->sctbl->syscalls.nr_entries; ++key) { - if (enabled) - trace__init_bpf_map_syscall_args(trace, key, &value); - - err = bpf_map_update_elem(fd, &key, &value, BPF_ANY); - if (err) - break; - } - - return err; -} - -static int trace__init_syscalls_bpf_map(struct trace *trace) -{ - bool enabled = true; - - if (trace->ev_qualifier_ids.nr) - enabled = trace->not_ev_qualifier; - - return __trace__init_syscalls_bpf_map(trace, enabled); -} - static struct bpf_program *trace__find_usable_bpf_prog_entry(struct trace *trace, struct syscall *sc) { struct tep_format_field *field, *candidate_field; @@ -3627,16 +3541,6 @@ static void trace__set_bpf_map_syscalls(struct trace *trace __maybe_unused) { } -static int trace__set_ev_qualifier_bpf_filter(struct trace *trace __maybe_unused) -{ - return 0; -} - -static int trace__init_syscalls_bpf_map(struct trace *trace __maybe_unused) -{ - return 0; -} - static struct bpf_program *trace__find_bpf_program_by_title(struct trace *trace __maybe_unused, const char *name __maybe_unused) { @@ -3670,8 +3574,6 @@ static bool trace__only_augmented_syscalls_evsels(struct trace *trace) static int trace__set_ev_qualifier_filter(struct trace *trace) { - if (trace->syscalls.map) - return trace__set_ev_qualifier_bpf_filter(trace); if (trace->syscalls.events.sys_enter) return trace__set_ev_qualifier_tp_filter(trace); return 0; @@ -4045,9 +3947,6 @@ static int trace__run(struct trace *trace, int argc, const char **argv) if (err < 0) goto out_error_mem; - if (trace->syscalls.map) - trace__init_syscalls_bpf_map(trace); - if (trace->syscalls.prog_array.sys_enter) trace__init_syscalls_bpf_prog_array_maps(trace); |