diff options
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/util/bpf_kwork.c | 38 | ||||
-rw-r--r-- | tools/perf/util/bpf_skel/kwork_trace.bpf.c | 150 |
2 files changed, 187 insertions, 1 deletions
diff --git a/tools/perf/util/bpf_kwork.c b/tools/perf/util/bpf_kwork.c index 433bfadd3af1..a7af3ca5f503 100644 --- a/tools/perf/util/bpf_kwork.c +++ b/tools/perf/util/bpf_kwork.c @@ -62,9 +62,45 @@ void perf_kwork__trace_finish(void) skel->bss->enabled = 0; } +static int get_work_name_from_map(struct work_key *key, char **ret_name) +{ + char name[MAX_KWORKNAME] = { 0 }; + int fd = bpf_map__fd(skel->maps.perf_kwork_names); + + *ret_name = NULL; + + if (fd < 0) { + pr_debug("Invalid names map fd\n"); + return 0; + } + + if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) { + *ret_name = strdup(name); + if (*ret_name == NULL) { + pr_err("Failed to copy work name\n"); + return -1; + } + } + + return 0; +} + +static void irq_load_prepare(struct perf_kwork *kwork) +{ + if (kwork->report == KWORK_REPORT_RUNTIME) { + bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true); + bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true); + } +} + +static struct kwork_class_bpf kwork_irq_bpf = { + .load_prepare = irq_load_prepare, + .get_work_name = get_work_name_from_map, +}; + static struct kwork_class_bpf * kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = { - [KWORK_CLASS_IRQ] = NULL, + [KWORK_CLASS_IRQ] = &kwork_irq_bpf, [KWORK_CLASS_SOFTIRQ] = NULL, [KWORK_CLASS_WORKQUEUE] = NULL, }; diff --git a/tools/perf/util/bpf_skel/kwork_trace.bpf.c b/tools/perf/util/bpf_skel/kwork_trace.bpf.c index 36112be831e3..1925407d1c16 100644 --- a/tools/perf/util/bpf_skel/kwork_trace.bpf.c +++ b/tools/perf/util/bpf_skel/kwork_trace.bpf.c @@ -71,4 +71,154 @@ int enabled = 0; int has_cpu_filter = 0; int has_name_filter = 0; +static __always_inline int local_strncmp(const char *s1, + unsigned int sz, const char *s2) +{ + int ret = 0; + unsigned int i; + + for (i = 0; i < sz; i++) { + ret = (unsigned char)s1[i] - (unsigned char)s2[i]; + if (ret || !s1[i] || !s2[i]) + break; + } + + return ret; +} + +static __always_inline int trace_event_match(struct work_key *key, char *name) +{ + __u8 *cpu_val; + char *name_val; + __u32 zero = 0; + __u32 cpu = bpf_get_smp_processor_id(); + + if (!enabled) + return 0; + + if (has_cpu_filter) { + cpu_val = bpf_map_lookup_elem(&perf_kwork_cpu_filter, &cpu); + if (!cpu_val) + return 0; + } + + if (has_name_filter && (name != NULL)) { + name_val = bpf_map_lookup_elem(&perf_kwork_name_filter, &zero); + if (name_val && + (local_strncmp(name_val, MAX_KWORKNAME, name) != 0)) { + return 0; + } + } + + return 1; +} + +static __always_inline void do_update_time(void *map, struct work_key *key, + __u64 time_start, __u64 time_end) +{ + struct report_data zero, *data; + __s64 delta = time_end - time_start; + + if (delta < 0) + return; + + data = bpf_map_lookup_elem(map, key); + if (!data) { + __builtin_memset(&zero, 0, sizeof(zero)); + bpf_map_update_elem(map, key, &zero, BPF_NOEXIST); + data = bpf_map_lookup_elem(map, key); + if (!data) + return; + } + + if ((delta > data->max_time) || + (data->max_time == 0)) { + data->max_time = delta; + data->max_time_start = time_start; + data->max_time_end = time_end; + } + + data->total_time += delta; + data->nr++; +} + +static __always_inline void do_update_timestart(void *map, struct work_key *key) +{ + __u64 ts = bpf_ktime_get_ns(); + + bpf_map_update_elem(map, key, &ts, BPF_ANY); +} + +static __always_inline void do_update_timeend(void *report_map, void *time_map, + struct work_key *key) +{ + __u64 *time = bpf_map_lookup_elem(time_map, key); + + if (time) { + bpf_map_delete_elem(time_map, key); + do_update_time(report_map, key, *time, bpf_ktime_get_ns()); + } +} + +static __always_inline void do_update_name(void *map, + struct work_key *key, char *name) +{ + if (!bpf_map_lookup_elem(map, key)) + bpf_map_update_elem(map, key, name, BPF_ANY); +} + +static __always_inline int update_timestart_and_name(void *time_map, + void *names_map, + struct work_key *key, + char *name) +{ + if (!trace_event_match(key, name)) + return 0; + + do_update_timestart(time_map, key); + do_update_name(names_map, key, name); + + return 0; +} + +static __always_inline int update_timeend(void *report_map, + void *time_map, struct work_key *key) +{ + if (!trace_event_match(key, NULL)) + return 0; + + do_update_timeend(report_map, time_map, key); + + return 0; +} + +SEC("tracepoint/irq/irq_handler_entry") +int report_irq_handler_entry(struct trace_event_raw_irq_handler_entry *ctx) +{ + char name[MAX_KWORKNAME]; + struct work_key key = { + .type = KWORK_CLASS_IRQ, + .cpu = bpf_get_smp_processor_id(), + .id = (__u64)ctx->irq, + }; + void *name_addr = (void *)ctx + (ctx->__data_loc_name & 0xffff); + + bpf_probe_read_kernel_str(name, sizeof(name), name_addr); + + return update_timestart_and_name(&perf_kwork_time, + &perf_kwork_names, &key, name); +} + +SEC("tracepoint/irq/irq_handler_exit") +int report_irq_handler_exit(struct trace_event_raw_irq_handler_exit *ctx) +{ + struct work_key key = { + .type = KWORK_CLASS_IRQ, + .cpu = bpf_get_smp_processor_id(), + .id = (__u64)ctx->irq, + }; + + return update_timeend(&perf_kwork_report, &perf_kwork_time, &key); +} + char LICENSE[] SEC("license") = "Dual BSD/GPL"; |