diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-26 21:25:21 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-08-26 21:25:21 +0300 |
commit | d207ea8e74ff45be0838afa12bdd2492fa9dc8bc (patch) | |
tree | 97cfb3ed5c1bb42790e98e62b823526f61000b9f /tools/perf/util | |
parent | 2a8a2b7c49d6eb5f3348892c4676267376cfd40b (diff) | |
parent | 66e5db4a1ccc64f278653bc69dc406d184dc750a (diff) | |
download | linux-d207ea8e74ff45be0838afa12bdd2492fa9dc8bc.tar.xz |
Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf updates from Thomas Gleixner:
"Kernel:
- Improve kallsyms coverage
- Add x86 entry trampolines to kcore
- Fix ARM SPE handling
- Correct PPC event post processing
Tools:
- Make the build system more robust
- Small fixes and enhancements all over the place
- Update kernel ABI header copies
- Preparatory work for converting libtraceevnt to a shared library
- License cleanups"
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (100 commits)
tools arch: Update arch/x86/lib/memcpy_64.S copy used in 'perf bench mem memcpy'
tools arch x86: Update tools's copy of cpufeatures.h
perf python: Fix pyrf_evlist__read_on_cpu() interface
perf mmap: Store real cpu number in 'struct perf_mmap'
perf tools: Remove ext from struct kmod_path
perf tools: Add gzip_is_compressed function
perf tools: Add lzma_is_compressed function
perf tools: Add is_compressed callback to compressions array
perf tools: Move the temp file processing into decompress_kmodule
perf tools: Use compression id in decompress_kmodule()
perf tools: Store compression id into struct dso
perf tools: Add compression id into 'struct kmod_path'
perf tools: Make is_supported_compression() static
perf tools: Make decompress_to_file() function static
perf tools: Get rid of dso__needs_decompress() call in __open_dso()
perf tools: Get rid of dso__needs_decompress() call in symbol__disassemble()
perf tools: Get rid of dso__needs_decompress() call in read_object_code()
tools lib traceevent: Change to SPDX License format
perf llvm: Allow passing options to llc in addition to clang
perf parser: Improve error message for PMU address filters
...
Diffstat (limited to 'tools/perf/util')
42 files changed, 1634 insertions, 360 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index b604ef334dc9..7efe15b9618d 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -87,6 +87,7 @@ libperf-$(CONFIG_AUXTRACE) += intel-pt.o libperf-$(CONFIG_AUXTRACE) += intel-bts.o libperf-$(CONFIG_AUXTRACE) += arm-spe.o libperf-$(CONFIG_AUXTRACE) += arm-spe-pkt-decoder.o +libperf-$(CONFIG_AUXTRACE) += s390-cpumsf.o ifdef CONFIG_LIBOPENCSD libperf-$(CONFIG_AUXTRACE) += cs-etm.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index f91775b4bc3c..20061cf42288 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -49,6 +49,7 @@ struct annotation_options annotation__default_options = { .jump_arrows = true, .annotate_src = true, .offset_level = ANNOTATION__OFFSET_JUMP_TARGETS, + .percent_type = PERCENT_PERIOD_LOCAL, }; static regex_t file_lineno; @@ -1108,7 +1109,7 @@ annotation_line__new(struct annotate_args *args, size_t privsize) if (perf_evsel__is_group_event(evsel)) nr = evsel->nr_members; - size += sizeof(al->samples[0]) * nr; + size += sizeof(al->data[0]) * nr; al = zalloc(size); if (al) { @@ -1117,7 +1118,7 @@ annotation_line__new(struct annotate_args *args, size_t privsize) al->offset = args->offset; al->line = strdup(args->line); al->line_nr = args->line_nr; - al->samples_nr = nr; + al->data_nr = nr; } return al; @@ -1297,7 +1298,8 @@ static int disasm_line__print(struct disasm_line *dl, u64 start, int addr_fmt_wi static int annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start, struct perf_evsel *evsel, u64 len, int min_pcnt, int printed, - int max_lines, struct annotation_line *queue, int addr_fmt_width) + int max_lines, struct annotation_line *queue, int addr_fmt_width, + int percent_type) { struct disasm_line *dl = container_of(al, struct disasm_line, al); static const char *prev_line; @@ -1309,15 +1311,18 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start const char *color; struct annotation *notes = symbol__annotation(sym); - for (i = 0; i < al->samples_nr; i++) { - struct annotation_data *sample = &al->samples[i]; + for (i = 0; i < al->data_nr; i++) { + double percent; - if (sample->percent > max_percent) - max_percent = sample->percent; + percent = annotation_data__percent(&al->data[i], + percent_type); + + if (percent > max_percent) + max_percent = percent; } - if (al->samples_nr > nr_percent) - nr_percent = al->samples_nr; + if (al->data_nr > nr_percent) + nr_percent = al->data_nr; if (max_percent < min_pcnt) return -1; @@ -1330,7 +1335,8 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start if (queue == al) break; annotation_line__print(queue, sym, start, evsel, len, - 0, 0, 1, NULL, addr_fmt_width); + 0, 0, 1, NULL, addr_fmt_width, + percent_type); } } @@ -1351,18 +1357,20 @@ annotation_line__print(struct annotation_line *al, struct symbol *sym, u64 start } for (i = 0; i < nr_percent; i++) { - struct annotation_data *sample = &al->samples[i]; + struct annotation_data *data = &al->data[i]; + double percent; - color = get_percent_color(sample->percent); + percent = annotation_data__percent(data, percent_type); + color = get_percent_color(percent); if (symbol_conf.show_total_period) color_fprintf(stdout, color, " %11" PRIu64, - sample->he.period); + data->he.period); else if (symbol_conf.show_nr_samples) color_fprintf(stdout, color, " %7" PRIu64, - sample->he.nr_samples); + data->he.nr_samples); else - color_fprintf(stdout, color, " %7.2f", sample->percent); + color_fprintf(stdout, color, " %7.2f", percent); } printf(" : "); @@ -1621,6 +1629,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) char symfs_filename[PATH_MAX]; struct kcore_extract kce; bool delete_extract = false; + bool decomp = false; int stdout_fd[2]; int lineno = 0; int nline; @@ -1654,6 +1663,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) tmp, sizeof(tmp)) < 0) goto out; + decomp = true; strcpy(symfs_filename, tmp); } @@ -1740,7 +1750,7 @@ out_free_command: out_remove_tmp: close(stdout_fd[0]); - if (dso__needs_decompress(dso)) + if (decomp) unlink(symfs_filename); if (delete_extract) @@ -1753,34 +1763,45 @@ out_close_stdout: goto out_free_command; } -static void calc_percent(struct sym_hist *hist, - struct annotation_data *sample, +static void calc_percent(struct sym_hist *sym_hist, + struct hists *hists, + struct annotation_data *data, s64 offset, s64 end) { unsigned int hits = 0; u64 period = 0; while (offset < end) { - hits += hist->addr[offset].nr_samples; - period += hist->addr[offset].period; + hits += sym_hist->addr[offset].nr_samples; + period += sym_hist->addr[offset].period; ++offset; } - if (hist->nr_samples) { - sample->he.period = period; - sample->he.nr_samples = hits; - sample->percent = 100.0 * hits / hist->nr_samples; + if (sym_hist->nr_samples) { + data->he.period = period; + data->he.nr_samples = hits; + data->percent[PERCENT_HITS_LOCAL] = 100.0 * hits / sym_hist->nr_samples; } + + if (hists->stats.nr_non_filtered_samples) + data->percent[PERCENT_HITS_GLOBAL] = 100.0 * hits / hists->stats.nr_non_filtered_samples; + + if (sym_hist->period) + data->percent[PERCENT_PERIOD_LOCAL] = 100.0 * period / sym_hist->period; + + if (hists->stats.total_period) + data->percent[PERCENT_PERIOD_GLOBAL] = 100.0 * period / hists->stats.total_period; } static void annotation__calc_percent(struct annotation *notes, - struct perf_evsel *evsel, s64 len) + struct perf_evsel *leader, s64 len) { struct annotation_line *al, *next; + struct perf_evsel *evsel; list_for_each_entry(al, ¬es->src->source, node) { s64 end; - int i; + int i = 0; if (al->offset == -1) continue; @@ -1788,14 +1809,17 @@ static void annotation__calc_percent(struct annotation *notes, next = annotation_line__next(al, ¬es->src->source); end = next ? next->offset : len; - for (i = 0; i < al->samples_nr; i++) { - struct annotation_data *sample; - struct sym_hist *hist; + for_each_group_evsel(evsel, leader) { + struct hists *hists = evsel__hists(evsel); + struct annotation_data *data; + struct sym_hist *sym_hist; + + BUG_ON(i >= al->data_nr); - hist = annotation__histogram(notes, evsel->idx + i); - sample = &al->samples[i]; + sym_hist = annotation__histogram(notes, evsel->idx); + data = &al->data[i++]; - calc_percent(hist, sample, al->offset, end); + calc_percent(sym_hist, hists, data, al->offset, end); } } } @@ -1846,7 +1870,8 @@ int symbol__annotate(struct symbol *sym, struct map *map, return symbol__disassemble(sym, &args); } -static void insert_source_line(struct rb_root *root, struct annotation_line *al) +static void insert_source_line(struct rb_root *root, struct annotation_line *al, + struct annotation_options *opts) { struct annotation_line *iter; struct rb_node **p = &root->rb_node; @@ -1859,8 +1884,10 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al) ret = strcmp(iter->path, al->path); if (ret == 0) { - for (i = 0; i < al->samples_nr; i++) - iter->samples[i].percent_sum += al->samples[i].percent; + for (i = 0; i < al->data_nr; i++) { + iter->data[i].percent_sum += annotation_data__percent(&al->data[i], + opts->percent_type); + } return; } @@ -1870,8 +1897,10 @@ static void insert_source_line(struct rb_root *root, struct annotation_line *al) p = &(*p)->rb_right; } - for (i = 0; i < al->samples_nr; i++) - al->samples[i].percent_sum = al->samples[i].percent; + for (i = 0; i < al->data_nr; i++) { + al->data[i].percent_sum = annotation_data__percent(&al->data[i], + opts->percent_type); + } rb_link_node(&al->rb_node, parent, p); rb_insert_color(&al->rb_node, root); @@ -1881,10 +1910,10 @@ static int cmp_source_line(struct annotation_line *a, struct annotation_line *b) { int i; - for (i = 0; i < a->samples_nr; i++) { - if (a->samples[i].percent_sum == b->samples[i].percent_sum) + for (i = 0; i < a->data_nr; i++) { + if (a->data[i].percent_sum == b->data[i].percent_sum) continue; - return a->samples[i].percent_sum > b->samples[i].percent_sum; + return a->data[i].percent_sum > b->data[i].percent_sum; } return 0; @@ -1949,8 +1978,8 @@ static void print_summary(struct rb_root *root, const char *filename) int i; al = rb_entry(node, struct annotation_line, rb_node); - for (i = 0; i < al->samples_nr; i++) { - percent = al->samples[i].percent_sum; + for (i = 0; i < al->data_nr; i++) { + percent = al->data[i].percent_sum; color = get_percent_color(percent); color_fprintf(stdout, color, " %7.2f", percent); @@ -2029,10 +2058,12 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, evsel_name = buf; } - graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples)\n", + graph_dotted_len = printf(" %-*.*s| Source code & Disassembly of %s for %s (%" PRIu64 " samples, " + "percent: %s)\n", width, width, symbol_conf.show_total_period ? "Period" : symbol_conf.show_nr_samples ? "Samples" : "Percent", - d_filename, evsel_name, h->nr_samples); + d_filename, evsel_name, h->nr_samples, + percent_type_str(opts->percent_type)); printf("%-*.*s----\n", graph_dotted_len, graph_dotted_len, graph_dotted_line); @@ -2052,7 +2083,7 @@ int symbol__annotate_printf(struct symbol *sym, struct map *map, err = annotation_line__print(pos, sym, start, evsel, len, opts->min_pcnt, printed, opts->max_lines, - queue, addr_fmt_width); + queue, addr_fmt_width, opts->percent_type); switch (err) { case 0: @@ -2129,10 +2160,11 @@ static void FILE__write_graph(void *fp, int graph) fputs(s, fp); } -int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp) +static int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp, + struct annotation_options *opts) { struct annotation *notes = symbol__annotation(sym); - struct annotation_write_ops ops = { + struct annotation_write_ops wops = { .first_line = true, .obj = fp, .set_color = FILE__set_color, @@ -2146,15 +2178,16 @@ int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp) list_for_each_entry(al, ¬es->src->source, node) { if (annotation_line__filter(al, notes)) continue; - annotation_line__write(al, notes, &ops); + annotation_line__write(al, notes, &wops, opts); fputc('\n', fp); - ops.first_line = false; + wops.first_line = false; } return 0; } -int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel) +int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel, + struct annotation_options *opts) { const char *ev_name = perf_evsel__name(evsel); char buf[1024]; @@ -2176,7 +2209,7 @@ int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel) fprintf(fp, "%s() %s\nEvent: %s\n\n", ms->sym->name, ms->map->dso->long_name, ev_name); - symbol__annotate_fprintf2(ms->sym, fp); + symbol__annotate_fprintf2(ms->sym, fp, opts); fclose(fp); err = 0; @@ -2346,7 +2379,8 @@ void annotation__update_column_widths(struct annotation *notes) } static void annotation__calc_lines(struct annotation *notes, struct map *map, - struct rb_root *root) + struct rb_root *root, + struct annotation_options *opts) { struct annotation_line *al; struct rb_root tmp_root = RB_ROOT; @@ -2355,13 +2389,14 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, double percent_max = 0.0; int i; - for (i = 0; i < al->samples_nr; i++) { - struct annotation_data *sample; + for (i = 0; i < al->data_nr; i++) { + double percent; - sample = &al->samples[i]; + percent = annotation_data__percent(&al->data[i], + opts->percent_type); - if (sample->percent > percent_max) - percent_max = sample->percent; + if (percent > percent_max) + percent_max = percent; } if (percent_max <= 0.5) @@ -2369,18 +2404,19 @@ static void annotation__calc_lines(struct annotation *notes, struct map *map, al->path = get_srcline(map->dso, notes->start + al->offset, NULL, false, true, notes->start + al->offset); - insert_source_line(&tmp_root, al); + insert_source_line(&tmp_root, al, opts); } resort_source_line(root, &tmp_root); } static void symbol__calc_lines(struct symbol *sym, struct map *map, - struct rb_root *root) + struct rb_root *root, + struct annotation_options *opts) { struct annotation *notes = symbol__annotation(sym); - annotation__calc_lines(notes, map, root); + annotation__calc_lines(notes, map, root, opts); } int symbol__tty_annotate2(struct symbol *sym, struct map *map, @@ -2389,7 +2425,7 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map, { struct dso *dso = map->dso; struct rb_root source_line = RB_ROOT; - struct annotation *notes = symbol__annotation(sym); + struct hists *hists = evsel__hists(evsel); char buf[1024]; if (symbol__annotate2(sym, map, evsel, opts, NULL) < 0) @@ -2397,13 +2433,14 @@ int symbol__tty_annotate2(struct symbol *sym, struct map *map, if (opts->print_lines) { srcline_full_filename = opts->full_path; - symbol__calc_lines(sym, map, &source_line); + symbol__calc_lines(sym, map, &source_line, opts); print_summary(&source_line, dso->long_name); } - annotation__scnprintf_samples_period(notes, buf, sizeof(buf), evsel); - fprintf(stdout, "%s\n%s() %s\n", buf, sym->name, dso->long_name); - symbol__annotate_fprintf2(sym, stdout); + hists__scnprintf_title(hists, buf, sizeof(buf)); + fprintf(stdout, "%s, [percent: %s]\n%s() %s\n", + buf, percent_type_str(opts->percent_type), sym->name, dso->long_name); + symbol__annotate_fprintf2(sym, stdout, opts); annotated_source__purge(symbol__annotation(sym)->src); @@ -2424,7 +2461,7 @@ int symbol__tty_annotate(struct symbol *sym, struct map *map, if (opts->print_lines) { srcline_full_filename = opts->full_path; - symbol__calc_lines(sym, map, &source_line); + symbol__calc_lines(sym, map, &source_line, opts); print_summary(&source_line, dso->long_name); } @@ -2441,14 +2478,21 @@ bool ui__has_annotation(void) } -double annotation_line__max_percent(struct annotation_line *al, struct annotation *notes) +static double annotation_line__max_percent(struct annotation_line *al, + struct annotation *notes, + unsigned int percent_type) { double percent_max = 0.0; int i; for (i = 0; i < notes->nr_events; i++) { - if (al->samples[i].percent > percent_max) - percent_max = al->samples[i].percent; + double percent; + + percent = annotation_data__percent(&al->data[i], + percent_type); + + if (percent > percent_max) + percent_max = percent; } return percent_max; @@ -2487,7 +2531,7 @@ call_like: static void __annotation_line__write(struct annotation_line *al, struct annotation *notes, bool first_line, bool current_entry, bool change_color, int width, - void *obj, + void *obj, unsigned int percent_type, int (*obj__set_color)(void *obj, int color), void (*obj__set_percent_color)(void *obj, double percent, bool current), int (*obj__set_jumps_percent_color)(void *obj, int nr, bool current), @@ -2495,7 +2539,7 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati void (*obj__write_graph)(void *obj, int graph)) { - double percent_max = annotation_line__max_percent(al, notes); + double percent_max = annotation_line__max_percent(al, notes, percent_type); int pcnt_width = annotation__pcnt_width(notes), cycles_width = annotation__cycles_width(notes); bool show_title = false; @@ -2514,15 +2558,18 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati int i; for (i = 0; i < notes->nr_events; i++) { - obj__set_percent_color(obj, al->samples[i].percent, current_entry); + double percent; + + percent = annotation_data__percent(&al->data[i], percent_type); + + obj__set_percent_color(obj, percent, current_entry); if (notes->options->show_total_period) { - obj__printf(obj, "%11" PRIu64 " ", al->samples[i].he.period); + obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period); } else if (notes->options->show_nr_samples) { obj__printf(obj, "%6" PRIu64 " ", - al->samples[i].he.nr_samples); + al->data[i].he.nr_samples); } else { - obj__printf(obj, "%6.2f ", - al->samples[i].percent); + obj__printf(obj, "%6.2f ", percent); } } } else { @@ -2640,13 +2687,15 @@ print_addr: } void annotation_line__write(struct annotation_line *al, struct annotation *notes, - struct annotation_write_ops *ops) + struct annotation_write_ops *wops, + struct annotation_options *opts) { - __annotation_line__write(al, notes, ops->first_line, ops->current_entry, - ops->change_color, ops->width, ops->obj, - ops->set_color, ops->set_percent_color, - ops->set_jumps_percent_color, ops->printf, - ops->write_graph); + __annotation_line__write(al, notes, wops->first_line, wops->current_entry, + wops->change_color, wops->width, wops->obj, + opts->percent_type, + wops->set_color, wops->set_percent_color, + wops->set_jumps_percent_color, wops->printf, + wops->write_graph); } int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *evsel, @@ -2688,46 +2737,6 @@ out_free_offsets: return -1; } -int __annotation__scnprintf_samples_period(struct annotation *notes, - char *bf, size_t size, - struct perf_evsel *evsel, - bool show_freq) -{ - const char *ev_name = perf_evsel__name(evsel); - char buf[1024], ref[30] = " show reference callgraph, "; - char sample_freq_str[64] = ""; - unsigned long nr_samples = 0; - int nr_members = 1; - bool enable_ref = false; - u64 nr_events = 0; - char unit; - int i; - - if (perf_evsel__is_group_event(evsel)) { - perf_evsel__group_desc(evsel, buf, sizeof(buf)); - ev_name = buf; - nr_members = evsel->nr_members; - } - - for (i = 0; i < nr_members; i++) { - struct sym_hist *ah = annotation__histogram(notes, evsel->idx + i); - - nr_samples += ah->nr_samples; - nr_events += ah->period; - } - - if (symbol_conf.show_ref_callgraph && strstr(ev_name, "call-graph=no")) - enable_ref = true; - - if (show_freq) - scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->attr.sample_freq); - - nr_samples = convert_unit(nr_samples, &unit); - return scnprintf(bf, size, "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64, - nr_samples, unit, evsel->nr_members > 1 ? "s" : "", - ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events); -} - #define ANNOTATION__CFG(n) \ { .name = #n, .value = &annotation__default_options.n, } @@ -2792,3 +2801,55 @@ void annotation_config__init(void) annotation__default_options.show_total_period = symbol_conf.show_total_period; annotation__default_options.show_nr_samples = symbol_conf.show_nr_samples; } + +static unsigned int parse_percent_type(char *str1, char *str2) +{ + unsigned int type = (unsigned int) -1; + + if (!strcmp("period", str1)) { + if (!strcmp("local", str2)) + type = PERCENT_PERIOD_LOCAL; + else if (!strcmp("global", str2)) + type = PERCENT_PERIOD_GLOBAL; + } + + if (!strcmp("hits", str1)) { + if (!strcmp("local", str2)) + type = PERCENT_HITS_LOCAL; + else if (!strcmp("global", str2)) + type = PERCENT_HITS_GLOBAL; + } + + return type; +} + +int annotate_parse_percent_type(const struct option *opt, const char *_str, + int unset __maybe_unused) +{ + struct annotation_options *opts = opt->value; + unsigned int type; + char *str1, *str2; + int err = -1; + + str1 = strdup(_str); + if (!str1) + return -ENOMEM; + + str2 = strchr(str1, '-'); + if (!str2) + goto out; + + *str2++ = 0; + + type = parse_percent_type(str1, str2); + if (type == (unsigned int) -1) + type = parse_percent_type(str2, str1); + if (type != (unsigned int) -1) { + opts->percent_type = type; + err = 0; + } + +out: + free(str1); + return err; +} diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index a4c0d91907e6..005a5fe8a8c6 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -11,6 +11,7 @@ #include <linux/list.h> #include <linux/rbtree.h> #include <pthread.h> +#include <asm/bug.h> struct ins_ops; @@ -82,6 +83,7 @@ struct annotation_options { int context; const char *objdump_path; const char *disassembler_style; + unsigned int percent_type; }; enum { @@ -101,8 +103,16 @@ struct sym_hist_entry { u64 period; }; +enum { + PERCENT_HITS_LOCAL, + PERCENT_HITS_GLOBAL, + PERCENT_PERIOD_LOCAL, + PERCENT_PERIOD_GLOBAL, + PERCENT_MAX, +}; + struct annotation_data { - double percent; + double percent[PERCENT_MAX]; double percent_sum; struct sym_hist_entry he; }; @@ -122,8 +132,8 @@ struct annotation_line { char *path; u32 idx; int idx_asm; - int samples_nr; - struct annotation_data samples[0]; + int data_nr; + struct annotation_data data[0]; }; struct disasm_line { @@ -134,6 +144,27 @@ struct disasm_line { struct annotation_line al; }; +static inline double annotation_data__percent(struct annotation_data *data, + unsigned int which) +{ + return which < PERCENT_MAX ? data->percent[which] : -1; +} + +static inline const char *percent_type_str(unsigned int type) +{ + static const char *str[PERCENT_MAX] = { + "local hits", + "global hits", + "local period", + "global period", + }; + + if (WARN_ON(type >= PERCENT_MAX)) + return "N/A"; + + return str[type]; +} + static inline struct disasm_line *disasm_line(struct annotation_line *al) { return al ? container_of(al, struct disasm_line, al) : NULL; @@ -169,22 +200,15 @@ struct annotation_write_ops { void (*write_graph)(void *obj, int graph); }; -double annotation_line__max_percent(struct annotation_line *al, struct annotation *notes); void annotation_line__write(struct annotation_line *al, struct annotation *notes, - struct annotation_write_ops *ops); + struct annotation_write_ops *ops, + struct annotation_options *opts); int __annotation__scnprintf_samples_period(struct annotation *notes, char *bf, size_t size, struct perf_evsel *evsel, bool show_freq); -static inline int annotation__scnprintf_samples_period(struct annotation *notes, - char *bf, size_t size, - struct perf_evsel *evsel) -{ - return __annotation__scnprintf_samples_period(notes, bf, size, evsel, true); -} - int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw); size_t disasm__fprintf(struct list_head *head, FILE *fp); void symbol__calc_percent(struct symbol *sym, struct perf_evsel *evsel); @@ -340,12 +364,12 @@ int symbol__strerror_disassemble(struct symbol *sym, struct map *map, int symbol__annotate_printf(struct symbol *sym, struct map *map, struct perf_evsel *evsel, struct annotation_options *options); -int symbol__annotate_fprintf2(struct symbol *sym, FILE *fp); void symbol__annotate_zero_histogram(struct symbol *sym, int evidx); void symbol__annotate_decay_histogram(struct symbol *sym, int evidx); void annotated_source__purge(struct annotated_source *as); -int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel); +int map_symbol__annotation_dump(struct map_symbol *ms, struct perf_evsel *evsel, + struct annotation_options *opts); bool ui__has_annotation(void); @@ -373,4 +397,6 @@ static inline int symbol__tui_annotate(struct symbol *sym __maybe_unused, void annotation_config__init(void); +int annotate_parse_percent_type(const struct option *opt, const char *_str, + int unset); #endif /* __PERF_ANNOTATE_H */ diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index d056447520a2..db1511359c5e 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -56,6 +56,7 @@ #include "intel-pt.h" #include "intel-bts.h" #include "arm-spe.h" +#include "s390-cpumsf.h" #include "sane_ctype.h" #include "symbol/kallsyms.h" @@ -202,6 +203,9 @@ static int auxtrace_queues__grow(struct auxtrace_queues *queues, for (i = 0; i < queues->nr_queues; i++) { list_splice_tail(&queues->queue_array[i].head, &queue_array[i].head); + queue_array[i].tid = queues->queue_array[i].tid; + queue_array[i].cpu = queues->queue_array[i].cpu; + queue_array[i].set = queues->queue_array[i].set; queue_array[i].priv = queues->queue_array[i].priv; } @@ -920,6 +924,8 @@ int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused, return arm_spe_process_auxtrace_info(event, session); case PERF_AUXTRACE_CS_ETM: return cs_etm__process_auxtrace_info(event, session); + case PERF_AUXTRACE_S390_CPUMSF: + return s390_cpumsf_process_auxtrace_info(event, session); case PERF_AUXTRACE_UNKNOWN: default: return -EINVAL; diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h index e731f55da072..71fc3bd74299 100644 --- a/tools/perf/util/auxtrace.h +++ b/tools/perf/util/auxtrace.h @@ -44,6 +44,7 @@ enum auxtrace_type { PERF_AUXTRACE_INTEL_BTS, PERF_AUXTRACE_CS_ETM, PERF_AUXTRACE_ARM_SPE, + PERF_AUXTRACE_S390_CPUMSF, }; enum itrace_period_type { diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index 3d02ae38ec56..47aac41349a2 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -1529,13 +1529,13 @@ int bpf__apply_obj_config(void) bpf_object__for_each_safe(obj, objtmp) \ bpf_map__for_each(pos, obj) -#define bpf__for_each_stdout_map(pos, obj, objtmp) \ +#define bpf__for_each_map_named(pos, obj, objtmp, name) \ bpf__for_each_map(pos, obj, objtmp) \ if (bpf_map__name(pos) && \ - (strcmp("__bpf_stdout__", \ + (strcmp(name, \ bpf_map__name(pos)) == 0)) -int bpf__setup_stdout(struct perf_evlist *evlist) +struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name) { struct bpf_map_priv *tmpl_priv = NULL; struct bpf_object *obj, *tmp; @@ -1544,11 +1544,11 @@ int bpf__setup_stdout(struct perf_evlist *evlist) int err; bool need_init = false; - bpf__for_each_stdout_map(map, obj, tmp) { + bpf__for_each_map_named(map, obj, tmp, name) { struct bpf_map_priv *priv = bpf_map__priv(map); if (IS_ERR(priv)) - return -BPF_LOADER_ERRNO__INTERNAL; + return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); /* * No need to check map type: type should have been @@ -1561,49 +1561,61 @@ int bpf__setup_stdout(struct perf_evlist *evlist) } if (!need_init) - return 0; + return NULL; if (!tmpl_priv) { - err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/", - NULL); + char *event_definition = NULL; + + if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0) + return ERR_PTR(-ENOMEM); + + err = parse_events(evlist, event_definition, NULL); + free(event_definition); + if (err) { - pr_debug("ERROR: failed to create bpf-output event\n"); - return -err; + pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name); + return ERR_PTR(-err); } evsel = perf_evlist__last(evlist); } - bpf__for_each_stdout_map(map, obj, tmp) { + bpf__for_each_map_named(map, obj, tmp, name) { struct bpf_map_priv *priv = bpf_map__priv(map); if (IS_ERR(priv)) - return -BPF_LOADER_ERRNO__INTERNAL; + return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); if (priv) continue; if (tmpl_priv) { priv = bpf_map_priv__clone(tmpl_priv); if (!priv) - return -ENOMEM; + return ERR_PTR(-ENOMEM); err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); if (err) { bpf_map_priv__clear(map, priv); - return err; + return ERR_PTR(err); } } else if (evsel) { struct bpf_map_op *op; op = bpf_map__add_newop(map, NULL); if (IS_ERR(op)) - return PTR_ERR(op); + return ERR_PTR(PTR_ERR(op)); op->op_type = BPF_MAP_OP_SET_EVSEL; op->v.evsel = evsel; } } - return 0; + return evsel; +} + +int bpf__setup_stdout(struct perf_evlist *evlist) +{ + struct perf_evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__"); + return IS_ERR(evsel) ? PTR_ERR(evsel) : 0; } #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) @@ -1780,8 +1792,8 @@ int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) return 0; } -int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, - int err, char *buf, size_t size) +int bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused, + int err, char *buf, size_t size) { bpf__strerror_head(err, buf, size); bpf__strerror_end(buf, size); diff --git a/tools/perf/util/bpf-loader.h b/tools/perf/util/bpf-loader.h index 5d3aefd6fae7..62d245a90e1d 100644 --- a/tools/perf/util/bpf-loader.h +++ b/tools/perf/util/bpf-loader.h @@ -43,6 +43,7 @@ enum bpf_loader_errno { __BPF_LOADER_ERRNO__END, }; +struct perf_evsel; struct bpf_object; struct parse_events_term; #define PERF_BPF_PROBE_GROUP "perf_bpf_probe" @@ -82,9 +83,8 @@ int bpf__apply_obj_config(void); int bpf__strerror_apply_obj_config(int err, char *buf, size_t size); int bpf__setup_stdout(struct perf_evlist *evlist); -int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err, - char *buf, size_t size); - +struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name); +int bpf__strerror_setup_output_event(struct perf_evlist *evlist, int err, char *buf, size_t size); #else #include <errno.h> @@ -138,6 +138,12 @@ bpf__setup_stdout(struct perf_evlist *evlist __maybe_unused) return 0; } +static inline struct perf_evsel * +bpf__setup_output_event(struct perf_evlist *evlist __maybe_unused, const char *name __maybe_unused) +{ + return NULL; +} + static inline int __bpf_strerror(char *buf, size_t size) { @@ -193,11 +199,16 @@ bpf__strerror_apply_obj_config(int err __maybe_unused, } static inline int -bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused, - int err __maybe_unused, char *buf, - size_t size) +bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused, + int err __maybe_unused, char *buf, size_t size) { return __bpf_strerror(buf, size); } + #endif + +static inline int bpf__strerror_setup_stdout(struct perf_evlist *evlist, int err, char *buf, size_t size) +{ + return bpf__strerror_setup_output_event(evlist, err, buf, size); +} #endif diff --git a/tools/perf/util/compress.h b/tools/perf/util/compress.h index ecca688a25fb..892e92e7e7fc 100644 --- a/tools/perf/util/compress.h +++ b/tools/perf/util/compress.h @@ -4,10 +4,12 @@ #ifdef HAVE_ZLIB_SUPPORT int gzip_decompress_to_file(const char *input, int output_fd); +bool gzip_is_compressed(const char *input); #endif #ifdef HAVE_LZMA_SUPPORT int lzma_decompress_to_file(const char *input, int output_fd); +bool lzma_is_compressed(const char *input); #endif #endif /* PERF_COMPRESS_H */ diff --git a/tools/perf/util/data-convert-bt.c b/tools/perf/util/data-convert-bt.c index 5744c12641a5..abd38abf1d91 100644 --- a/tools/perf/util/data-convert-bt.c +++ b/tools/perf/util/data-convert-bt.c @@ -310,8 +310,8 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, if (flags & FIELD_IS_DYNAMIC) { unsigned long long tmp_val; - tmp_val = pevent_read_number(fmtf->event->pevent, - data + offset, len); + tmp_val = tep_read_number(fmtf->event->pevent, + data + offset, len); offset = tmp_val; len = offset >> 16; offset &= 0xffff; @@ -353,7 +353,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw, else { unsigned long long value_int; - value_int = pevent_read_number( + value_int = tep_read_number( fmtf->event->pevent, data + offset + i * len, len); diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index 51cf82cf1882..bbed90e5d9bb 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -189,28 +189,34 @@ int dso__read_binary_type_filename(const struct dso *dso, return ret; } +enum { + COMP_ID__NONE = 0, +}; + static const struct { const char *fmt; int (*decompress)(const char *input, int output); + bool (*is_compressed)(const char *input); } compressions[] = { + [COMP_ID__NONE] = { .fmt = NULL, }, #ifdef HAVE_ZLIB_SUPPORT - { "gz", gzip_decompress_to_file }, + { "gz", gzip_decompress_to_file, gzip_is_compressed }, #endif #ifdef HAVE_LZMA_SUPPORT - { "xz", lzma_decompress_to_file }, + { "xz", lzma_decompress_to_file, lzma_is_compressed }, #endif - { NULL, NULL }, + { NULL, NULL, NULL }, }; -bool is_supported_compression(const char *ext) +static int is_supported_compression(const char *ext) { unsigned i; - for (i = 0; compressions[i].fmt; i++) { + for (i = 1; compressions[i].fmt; i++) { if (!strcmp(ext, compressions[i].fmt)) - return true; + return i; } - return false; + return COMP_ID__NONE; } bool is_kernel_module(const char *pathname, int cpumode) @@ -239,80 +245,73 @@ bool is_kernel_module(const char *pathname, int cpumode) return m.kmod; } -bool decompress_to_file(const char *ext, const char *filename, int output_fd) -{ - unsigned i; - - for (i = 0; compressions[i].fmt; i++) { - if (!strcmp(ext, compressions[i].fmt)) - return !compressions[i].decompress(filename, - output_fd); - } - return false; -} - bool dso__needs_decompress(struct dso *dso) { return dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP || dso->symtab_type == DSO_BINARY_TYPE__GUEST_KMODULE_COMP; } -static int decompress_kmodule(struct dso *dso, const char *name, char *tmpbuf) +static int decompress_kmodule(struct dso *dso, const char *name, + char *pathname, size_t len) { + char tmpbuf[] = KMOD_DECOMP_NAME; int fd = -1; - struct kmod_path m; if (!dso__needs_decompress(dso)) return -1; - if (kmod_path__parse_ext(&m, dso->long_name)) + if (dso->comp == COMP_ID__NONE) return -1; - if (!m.comp) - goto out; + /* + * We have proper compression id for DSO and yet the file + * behind the 'name' can still be plain uncompressed object. + * + * The reason is behind the logic we open the DSO object files, + * when we try all possible 'debug' objects until we find the + * data. So even if the DSO is represented by 'krava.xz' module, + * we can end up here opening ~/.debug/....23432432/debug' file + * which is not compressed. + * + * To keep this transparent, we detect this and return the file + * descriptor to the uncompressed file. + */ + if (!compressions[dso->comp].is_compressed(name)) + return open(name, O_RDONLY); fd = mkstemp(tmpbuf); if (fd < 0) { dso->load_errno = errno; - goto out; + return -1; } - if (!decompress_to_file(m.ext, name, fd)) { + if (compressions[dso->comp].decompress(name, fd)) { dso->load_errno = DSO_LOAD_ERRNO__DECOMPRESSION_FAILURE; close(fd); fd = -1; } -out: - free(m.ext); + if (!pathname || (fd < 0)) + unlink(tmpbuf); + + if (pathname && (fd >= 0)) + strncpy(pathname, tmpbuf, len); + return fd; } int dso__decompress_kmodule_fd(struct dso *dso, const char *name) { - char tmpbuf[] = KMOD_DECOMP_NAME; - int fd; - - fd = decompress_kmodule(dso, name, tmpbuf); - unlink(tmpbuf); - return fd; + return decompress_kmodule(dso, name, NULL, 0); } int dso__decompress_kmodule_path(struct dso *dso, const char *name, char *pathname, size_t len) { - char tmpbuf[] = KMOD_DECOMP_NAME; - int fd; + int fd = decompress_kmodule(dso, name, pathname, len); - fd = decompress_kmodule(dso, name, tmpbuf); - if (fd < 0) { - unlink(tmpbuf); - return -1; - } - - strncpy(pathname, tmpbuf, len); close(fd); - return 0; + return fd >= 0 ? 0 : -1; } /* @@ -332,7 +331,7 @@ int dso__decompress_kmodule_path(struct dso *dso, const char *name, * Returns 0 if there's no strdup error, -ENOMEM otherwise. */ int __kmod_path__parse(struct kmod_path *m, const char *path, - bool alloc_name, bool alloc_ext) + bool alloc_name) { const char *name = strrchr(path, '/'); const char *ext = strrchr(path, '.'); @@ -372,10 +371,9 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, return 0; } - if (is_supported_compression(ext + 1)) { - m->comp = true; + m->comp = is_supported_compression(ext + 1); + if (m->comp > COMP_ID__NONE) ext -= 3; - } /* Check .ko extension only if there's enough name left. */ if (ext > name) @@ -393,14 +391,6 @@ int __kmod_path__parse(struct kmod_path *m, const char *path, strxfrchar(m->name, '-', '_'); } - if (alloc_ext && m->comp) { - m->ext = strdup(ext + 4); - if (!m->ext) { - free((void *) m->name); - return -ENOMEM; - } - } - return 0; } @@ -413,8 +403,10 @@ void dso__set_module_info(struct dso *dso, struct kmod_path *m, dso->symtab_type = DSO_BINARY_TYPE__GUEST_KMODULE; /* _KMODULE_COMP should be next to _KMODULE */ - if (m->kmod && m->comp) + if (m->kmod && m->comp) { dso->symtab_type++; + dso->comp = m->comp; + } dso__set_short_name(dso, strdup(m->name), true); } @@ -468,6 +460,7 @@ static int __open_dso(struct dso *dso, struct machine *machine) int fd = -EINVAL; char *root_dir = (char *)""; char *name = malloc(PATH_MAX); + bool decomp = false; if (!name) return -ENOMEM; @@ -491,12 +484,13 @@ static int __open_dso(struct dso *dso, struct machine *machine) goto out; } + decomp = true; strcpy(name, newpath); } fd = do_open(name); - if (dso__needs_decompress(dso)) + if (decomp) unlink(name); out: @@ -1218,6 +1212,7 @@ struct dso *dso__new(const char *name) dso->a2l_fails = 1; dso->kernel = DSO_TYPE_USER; dso->needs_swap = DSO_SWAP__UNSET; + dso->comp = COMP_ID__NONE; RB_CLEAR_NODE(&dso->rb_node); dso->root = NULL; INIT_LIST_HEAD(&dso->node); diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index ef69de2e69ea..c5380500bed4 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -175,6 +175,7 @@ struct dso { u16 short_name_len; void *dwfl; /* DWARF debug info */ struct auxtrace_cache *auxtrace_cache; + int comp; /* dso data file */ struct { @@ -250,9 +251,7 @@ int dso__kernel_module_get_build_id(struct dso *dso, const char *root_dir); char dso__symtab_origin(const struct dso *dso); int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type, char *root_dir, char *filename, size_t size); -bool is_supported_compression(const char *ext); bool is_kernel_module(const char *pathname, int cpumode); -bool decompress_to_file(const char *ext, const char *filename, int output_fd); bool dso__needs_decompress(struct dso *dso); int dso__decompress_kmodule_fd(struct dso *dso, const char *name); int dso__decompress_kmodule_path(struct dso *dso, const char *name, @@ -263,17 +262,15 @@ int dso__decompress_kmodule_path(struct dso *dso, const char *name, struct kmod_path { char *name; - char *ext; - bool comp; + int comp; bool kmod; }; int __kmod_path__parse(struct kmod_path *m, const char *path, - bool alloc_name, bool alloc_ext); + bool alloc_name); -#define kmod_path__parse(__m, __p) __kmod_path__parse(__m, __p, false, false) -#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true , false) -#define kmod_path__parse_ext(__m, __p) __kmod_path__parse(__m, __p, false, true) +#define kmod_path__parse(__m, __p) __kmod_path__parse(__m, __p, false) +#define kmod_path__parse_name(__m, __p) __kmod_path__parse(__m, __p, true) void dso__set_module_info(struct dso *dso, struct kmod_path *m, struct machine *machine); diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 0c8ecf0c78a4..0cd42150f712 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -541,10 +541,17 @@ static int __event__synthesize_thread(union perf_event *comm_event, tgid, process, machine) < 0) return -1; + /* + * send mmap only for thread group leader + * see thread__init_map_groups + */ + if (pid == tgid && + perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, + process, machine, mmap_data, + proc_map_timeout)) + return -1; - return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine, mmap_data, - proc_map_timeout); + return 0; } if (machine__is_default_guest(machine)) diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e7a4b31a84fb..be440df29615 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -803,7 +803,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx, if (*output == -1) { *output = fd; - if (perf_mmap__mmap(&maps[idx], mp, *output) < 0) + if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0) return -1; } else { if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index ddf84b941abf..c980bbff6353 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -2683,7 +2683,7 @@ int perf_event__synthesize_sample(union perf_event *event, u64 type, struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name) { - return pevent_find_field(evsel->tp_format, name); + return tep_find_field(evsel->tp_format, name); } void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample, diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 973c03167947..163c960614d3 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -452,11 +452,18 @@ static inline int perf_evsel__group_idx(struct perf_evsel *evsel) return evsel->idx - evsel->leader->idx; } +/* Iterates group WITHOUT the leader. */ #define for_each_group_member(_evsel, _leader) \ for ((_evsel) = list_entry((_leader)->node.next, struct perf_evsel, node); \ (_evsel) && (_evsel)->leader == (_leader); \ (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node)) +/* Iterates group WITH the leader. */ +#define for_each_group_evsel(_evsel, _leader) \ +for ((_evsel) = _leader; \ + (_evsel) && (_evsel)->leader == (_leader); \ + (_evsel) = list_entry((_evsel)->node.next, struct perf_evsel, node)) + static inline bool perf_evsel__has_branch_callstack(const struct perf_evsel *evsel) { return evsel->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 5af58aac91ad..3cadc252dd89 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -279,8 +279,6 @@ static int do_read_bitmap(struct feat_fd *ff, unsigned long **pset, u64 *psize) if (!set) return -ENOMEM; - bitmap_zero(set, size); - p = (u64 *) set; for (i = 0; (u64) i < BITS_TO_U64(size); i++) { @@ -1285,7 +1283,6 @@ static int memory_node__read(struct memory_node *n, unsigned long idx) return -ENOMEM; } - bitmap_zero(n->set, size); n->node = idx; n->size = size; @@ -3207,7 +3204,7 @@ static int read_attr(int fd, struct perf_header *ph, } static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, - struct pevent *pevent) + struct tep_handle *pevent) { struct event_format *event; char bf[128]; @@ -3221,7 +3218,7 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, return -1; } - event = pevent_find_event(pevent, evsel->attr.config); + event = tep_find_event(pevent, evsel->attr.config); if (event == NULL) { pr_debug("cannot find event format for %d\n", (int)evsel->attr.config); return -1; @@ -3239,7 +3236,7 @@ static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, } static int perf_evlist__prepare_tracepoint_events(struct perf_evlist *evlist, - struct pevent *pevent) + struct tep_handle *pevent) { struct perf_evsel *pos; diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c index 5e94857dfca2..19262f98cd4e 100644 --- a/tools/perf/util/llvm-utils.c +++ b/tools/perf/util/llvm-utils.c @@ -22,12 +22,14 @@ "$CLANG_OPTIONS $KERNEL_INC_OPTIONS $PERF_BPF_INC_OPTIONS " \ "-Wno-unused-value -Wno-pointer-sign " \ "-working-directory $WORKING_DIR " \ - "-c \"$CLANG_SOURCE\" -target bpf -O2 -o -" + "-c \"$CLANG_SOURCE\" -target bpf $CLANG_EMIT_LLVM -O2 -o - $LLVM_OPTIONS_PIPE" struct llvm_param llvm_param = { .clang_path = "clang", + .llc_path = "llc", .clang_bpf_cmd_template = CLANG_BPF_CMD_DEFAULT_TEMPLATE, .clang_opt = NULL, + .opts = NULL, .kbuild_dir = NULL, .kbuild_opts = NULL, .user_set_param = false, @@ -51,6 +53,8 @@ int perf_llvm_config(const char *var, const char *value) llvm_param.kbuild_opts = strdup(value); else if (!strcmp(var, "dump-obj")) llvm_param.dump_obj = !!perf_config_bool(var, value); + else if (!strcmp(var, "opts")) + llvm_param.opts = strdup(value); else { pr_debug("Invalid LLVM config option: %s\n", value); return -1; @@ -430,11 +434,13 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, unsigned int kernel_version; char linux_version_code_str[64]; const char *clang_opt = llvm_param.clang_opt; - char clang_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64]; + char clang_path[PATH_MAX], llc_path[PATH_MAX], abspath[PATH_MAX], nr_cpus_avail_str[64]; char serr[STRERR_BUFSIZE]; char *kbuild_dir = NULL, *kbuild_include_opts = NULL, *perf_bpf_include_opts = NULL; const char *template = llvm_param.clang_bpf_cmd_template; + char *pipe_template = NULL; + const char *opts = llvm_param.opts; char *command_echo = NULL, *command_out; char *perf_include_dir = system_path(PERF_INCLUDE_DIR); @@ -484,6 +490,26 @@ int llvm__compile_bpf(const char *path, void **p_obj_buf, force_set_env("PERF_BPF_INC_OPTIONS", perf_bpf_include_opts); force_set_env("WORKING_DIR", kbuild_dir ? : "."); + if (opts) { + err = search_program(llvm_param.llc_path, "llc", llc_path); + if (err) { + pr_err("ERROR:\tunable to find llc.\n" + "Hint:\tTry to install latest clang/llvm to support BPF. Check your $PATH\n" + " \tand 'llc-path' option in [llvm] section of ~/.perfconfig.\n"); + version_notice(); + goto errout; + } + + if (asprintf(&pipe_template, "%s -emit-llvm | %s -march=bpf %s -filetype=obj -o -", + template, llc_path, opts) < 0) { + pr_err("ERROR:\tnot enough memory to setup command line\n"); + goto errout; + } + + template = pipe_template; + + } + /* * Since we may reset clang's working dir, path of source file * should be transferred into absolute path, except we want @@ -535,6 +561,7 @@ errout: free(obj_buf); free(perf_bpf_include_opts); free(perf_include_dir); + free(pipe_template); if (p_obj_buf) *p_obj_buf = NULL; if (p_obj_buf_sz) diff --git a/tools/perf/util/llvm-utils.h b/tools/perf/util/llvm-utils.h index d3ad8deb5db4..bf3f3f4c4fe2 100644 --- a/tools/perf/util/llvm-utils.h +++ b/tools/perf/util/llvm-utils.h @@ -11,6 +11,8 @@ struct llvm_param { /* Path of clang executable */ const char *clang_path; + /* Path of llc executable */ + const char *llc_path; /* * Template of clang bpf compiling. 5 env variables * can be used: @@ -23,6 +25,13 @@ struct llvm_param { const char *clang_bpf_cmd_template; /* Will be filled in $CLANG_OPTIONS */ const char *clang_opt; + /* + * If present it'll add -emit-llvm to $CLANG_OPTIONS to pipe + * the clang output to llc, useful for new llvm options not + * yet selectable via 'clang -mllvm option', such as -mattr=dwarfris + * in clang 6.0/llvm 7 + */ + const char *opts; /* Where to find kbuild system */ const char *kbuild_dir; /* diff --git a/tools/perf/util/lzma.c b/tools/perf/util/lzma.c index 07498eaddc08..b1dd29a9d915 100644 --- a/tools/perf/util/lzma.c +++ b/tools/perf/util/lzma.c @@ -3,9 +3,13 @@ #include <lzma.h> #include <stdio.h> #include <linux/compiler.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <fcntl.h> #include "compress.h" #include "util.h" #include "debug.h" +#include <unistd.h> #define BUFSIZE 8192 @@ -99,3 +103,19 @@ err_fclose: fclose(infile); return err; } + +bool lzma_is_compressed(const char *input) +{ + int fd = open(input, O_RDONLY); + const uint8_t magic[6] = { 0xFD, '7', 'z', 'X', 'Z', 0x00 }; + char buf[6] = { 0 }; + ssize_t rc; + + if (fd < 0) + return -1; + + rc = read(fd, buf, sizeof(buf)); + close(fd); + return rc == sizeof(buf) ? + memcmp(buf, magic, sizeof(buf)) == 0 : false; +} diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index b300a3973448..c4acd2001db0 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -1212,8 +1212,10 @@ static int map_groups__set_module_path(struct map_groups *mg, const char *path, * Full name could reveal us kmod compression, so * we need to update the symtab_type if needed. */ - if (m->comp && is_kmod_dso(map->dso)) + if (m->comp && is_kmod_dso(map->dso)) { map->dso->symtab_type++; + map->dso->comp = m->comp; + } return 0; } diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 1de7660d93e9..d856b85862e2 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -265,7 +265,7 @@ pid_t machine__get_current_tid(struct machine *machine, int cpu); int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid, pid_t tid); /* - * For use with libtraceevent's pevent_set_function_resolver() + * For use with libtraceevent's tep_set_function_resolver() */ char *machine__resolve_kernel_addr(void *vmachine, unsigned long long *addrp, char **modp); diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 89ac5b5dc218..36d0763311ef 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -381,20 +381,6 @@ struct map *map__clone(struct map *from) return map; } -int map__overlap(struct map *l, struct map *r) -{ - if (l->start > r->start) { - struct map *t = l; - l = r; - r = t; - } - - if (l->end > r->start) - return 1; - - return 0; -} - size_t map__fprintf(struct map *map, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", @@ -675,20 +661,42 @@ static void __map_groups__insert(struct map_groups *mg, struct map *map) static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp) { struct rb_root *root; - struct rb_node *next; + struct rb_node *next, *first; int err = 0; down_write(&maps->lock); root = &maps->entries; - next = rb_first(root); + /* + * Find first map where end > map->start. + * Same as find_vma() in kernel. + */ + next = root->rb_node; + first = NULL; + while (next) { + struct map *pos = rb_entry(next, struct map, rb_node); + + if (pos->end > map->start) { + first = next; + if (pos->start <= map->start) + break; + next = next->rb_left; + } else + next = next->rb_right; + } + + next = first; while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); - if (!map__overlap(pos, map)) - continue; + /* + * Stop if current map starts after map->end. + * Maps are ordered by start: next will not overlap for sure. + */ + if (pos->start >= map->end) + break; if (verbose >= 2) { diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index 4cb90f242bed..e0f327b51e66 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -166,7 +166,6 @@ static inline void __map__zput(struct map **map) #define map__zput(map) __map__zput(&map) -int map__overlap(struct map *l, struct map *r); size_t map__fprintf(struct map *map, FILE *fp); size_t map__fprintf_dsoname(struct map *map, FILE *fp); char *map__srcline(struct map *map, u64 addr, struct symbol *sym); diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index fc832676a798..215f69f41672 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -164,7 +164,7 @@ void perf_mmap__munmap(struct perf_mmap *map) auxtrace_mmap__munmap(&map->auxtrace_mmap); } -int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) +int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu) { /* * The last one will be done at perf_mmap__consume(), so that we @@ -191,6 +191,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd) return -1; } map->fd = fd; + map->cpu = cpu; if (auxtrace_mmap__mmap(&map->auxtrace_mmap, &mp->auxtrace_mp, map->base, fd)) diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index d82294db1295..05a6d47c7956 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -18,6 +18,7 @@ struct perf_mmap { void *base; int mask; int fd; + int cpu; refcount_t refcnt; u64 prev; u64 start; @@ -60,7 +61,7 @@ struct mmap_params { struct auxtrace_mmap_params auxtrace_mp; }; -int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd); +int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int cpu); void perf_mmap__munmap(struct perf_mmap *map); void perf_mmap__get(struct perf_mmap *map); diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c index 5be021701f34..cf8bd123cf73 100644 --- a/tools/perf/util/namespaces.c +++ b/tools/perf/util/namespaces.c @@ -139,6 +139,9 @@ struct nsinfo *nsinfo__copy(struct nsinfo *nsi) { struct nsinfo *nnsi; + if (nsi == NULL) + return NULL; + nnsi = calloc(1, sizeof(*nnsi)); if (nnsi != NULL) { nnsi->pid = nsi->pid; diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 15eec49e71a1..f8cd3e7c9186 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -1991,8 +1991,11 @@ static int set_filter(struct perf_evsel *evsel, const void *arg) int nr_addr_filters = 0; struct perf_pmu *pmu = NULL; - if (evsel == NULL) - goto err; + if (evsel == NULL) { + fprintf(stderr, + "--filter option should follow a -e tracepoint or HW tracer option\n"); + return -1; + } if (evsel->attr.type == PERF_TYPE_TRACEPOINT) { if (perf_evsel__append_tp_filter(evsel, str) < 0) { @@ -2014,8 +2017,11 @@ static int set_filter(struct perf_evsel *evsel, const void *arg) perf_pmu__scan_file(pmu, "nr_addr_filters", "%d", &nr_addr_filters); - if (!nr_addr_filters) - goto err; + if (!nr_addr_filters) { + fprintf(stderr, + "This CPU does not support address filtering\n"); + return -1; + } if (perf_evsel__append_addr_filter(evsel, str) < 0) { fprintf(stderr, @@ -2024,12 +2030,6 @@ static int set_filter(struct perf_evsel *evsel, const void *arg) } return 0; - -err: - fprintf(stderr, - "--filter option should follow a -e tracepoint or HW tracer option\n"); - - return -1; } int parse_filter(const struct option *opt, const char *str, diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 863b61478edd..ce501ba14b08 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -11,6 +11,7 @@ #include "cpumap.h" #include "print_binary.h" #include "thread_map.h" +#include "mmap.h" #if PY_MAJOR_VERSION < 3 #define _PyUnicode_FromString(arg) \ @@ -341,7 +342,7 @@ static bool is_tracepoint(struct pyrf_event *pevent) static PyObject* tracepoint_field(struct pyrf_event *pe, struct format_field *field) { - struct pevent *pevent = field->event->pevent; + struct tep_handle *pevent = field->event->pevent; void *data = pe->sample.raw_data; PyObject *ret = NULL; unsigned long long val; @@ -351,7 +352,7 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field) offset = field->offset; len = field->size; if (field->flags & FIELD_IS_DYNAMIC) { - val = pevent_read_number(pevent, data + offset, len); + val = tep_read_number(pevent, data + offset, len); offset = val; len = offset >> 16; offset &= 0xffff; @@ -364,8 +365,8 @@ tracepoint_field(struct pyrf_event *pe, struct format_field *field) field->flags &= ~FIELD_IS_STRING; } } else { - val = pevent_read_number(pevent, data + field->offset, - field->size); + val = tep_read_number(pevent, data + field->offset, + field->size); if (field->flags & FIELD_IS_POINTER) ret = PyLong_FromUnsignedLong((unsigned long) val); else if (field->flags & FIELD_IS_SIGNED) @@ -394,7 +395,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) evsel->tp_format = tp_format; } - field = pevent_find_any_field(evsel->tp_format, str); + field = tep_find_any_field(evsel->tp_format, str); if (!field) return NULL; @@ -976,6 +977,20 @@ static PyObject *pyrf_evlist__add(struct pyrf_evlist *pevlist, return Py_BuildValue("i", evlist->nr_entries); } +static struct perf_mmap *get_md(struct perf_evlist *evlist, int cpu) +{ + int i; + + for (i = 0; i < evlist->nr_mmaps; i++) { + struct perf_mmap *md = &evlist->mmap[i]; + + if (md->cpu == cpu) + return md; + } + + return NULL; +} + static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, PyObject *args, PyObject *kwargs) { @@ -990,7 +1005,10 @@ static PyObject *pyrf_evlist__read_on_cpu(struct pyrf_evlist *pevlist, &cpu, &sample_id_all)) return NULL; - md = &evlist->mmap[cpu]; + md = get_md(evlist, cpu); + if (!md) + return NULL; + if (perf_mmap__read_init(md) < 0) goto end; diff --git a/tools/perf/util/s390-cpumsf-kernel.h b/tools/perf/util/s390-cpumsf-kernel.h new file mode 100644 index 000000000000..de8c7ad0eca8 --- /dev/null +++ b/tools/perf/util/s390-cpumsf-kernel.h @@ -0,0 +1,71 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Auxtrace support for s390 CPU measurement sampling facility + * + * Copyright IBM Corp. 2018 + * Author(s): Hendrik Brueckner <brueckner@linux.ibm.com> + * Thomas Richter <tmricht@linux.ibm.com> + */ +#ifndef S390_CPUMSF_KERNEL_H +#define S390_CPUMSF_KERNEL_H + +#define S390_CPUMSF_PAGESZ 4096 /* Size of sample block units */ +#define S390_CPUMSF_DIAG_DEF_FIRST 0x8001 /* Diagnostic entry lowest id */ + +struct hws_basic_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:4; /* 16-19 reserved */ + unsigned int U:4; /* 20-23 Number of unique instruct. */ + unsigned int z:2; /* zeros */ + unsigned int T:1; /* 26 PSW DAT mode */ + unsigned int W:1; /* 27 PSW wait state */ + unsigned int P:1; /* 28 PSW Problem state */ + unsigned int AS:2; /* 29-30 PSW address-space control */ + unsigned int I:1; /* 31 entry valid or invalid */ + unsigned int CL:2; /* 32-33 Configuration Level */ + unsigned int:14; + unsigned int prim_asn:16; /* primary ASN */ + unsigned long long ia; /* Instruction Address */ + unsigned long long gpp; /* Guest Program Parameter */ + unsigned long long hpp; /* Host Program Parameter */ +}; + +struct hws_diag_entry { + unsigned int def:16; /* 0-15 Data Entry Format */ + unsigned int R:15; /* 16-19 and 20-30 reserved */ + unsigned int I:1; /* 31 entry valid or invalid */ + u8 data[]; /* Machine-dependent sample data */ +}; + +struct hws_combined_entry { + struct hws_basic_entry basic; /* Basic-sampling data entry */ + struct hws_diag_entry diag; /* Diagnostic-sampling data entry */ +}; + +struct hws_trailer_entry { + union { + struct { + unsigned int f:1; /* 0 - Block Full Indicator */ + unsigned int a:1; /* 1 - Alert request control */ + unsigned int t:1; /* 2 - Timestamp format */ + unsigned int:29; /* 3 - 31: Reserved */ + unsigned int bsdes:16; /* 32-47: size of basic SDE */ + unsigned int dsdes:16; /* 48-63: size of diagnostic SDE */ + }; + unsigned long long flags; /* 0 - 64: All indicators */ + }; + unsigned long long overflow; /* 64 - sample Overflow count */ + unsigned char timestamp[16]; /* 16 - 31 timestamp */ + unsigned long long reserved1; /* 32 -Reserved */ + unsigned long long reserved2; /* */ + union { /* 48 - reserved for programming use */ + struct { + unsigned long long clock_base:1; /* in progusage2 */ + unsigned long long progusage1:63; + unsigned long long progusage2; + }; + unsigned long long progusage[2]; + }; +}; + +#endif diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c new file mode 100644 index 000000000000..d2c78ffd9fee --- /dev/null +++ b/tools/perf/util/s390-cpumsf.c @@ -0,0 +1,945 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright IBM Corp. 2018 + * Auxtrace support for s390 CPU-Measurement Sampling Facility + * + * Author(s): Thomas Richter <tmricht@linux.ibm.com> + * + * Auxiliary traces are collected during 'perf record' using rbd000 event. + * Several PERF_RECORD_XXX are generated during recording: + * + * PERF_RECORD_AUX: + * Records that new data landed in the AUX buffer part. + * PERF_RECORD_AUXTRACE: + * Defines auxtrace data. Followed by the actual data. The contents of + * the auxtrace data is dependent on the event and the CPU. + * This record is generated by perf record command. For details + * see Documentation/perf.data-file-format.txt. + * PERF_RECORD_AUXTRACE_INFO: + * Defines a table of contains for PERF_RECORD_AUXTRACE records. This + * record is generated during 'perf record' command. Each record contains up + * to 256 entries describing offset and size of the AUXTRACE data in the + * perf.data file. + * PERF_RECORD_AUXTRACE_ERROR: + * Indicates an error during AUXTRACE collection such as buffer overflow. + * PERF_RECORD_FINISHED_ROUND: + * Perf events are not necessarily in time stamp order, as they can be + * collected in parallel on different CPUs. If the events should be + * processed in time order they need to be sorted first. + * Perf report guarantees that there is no reordering over a + * PERF_RECORD_FINISHED_ROUND boundary event. All perf records with a + * time stamp lower than this record are processed (and displayed) before + * the succeeding perf record are processed. + * + * These records are evaluated during perf report command. + * + * 1. PERF_RECORD_AUXTRACE_INFO is used to set up the infrastructure for + * auxiliary trace data processing. See s390_cpumsf_process_auxtrace_info() + * below. + * Auxiliary trace data is collected per CPU. To merge the data into the report + * an auxtrace_queue is created for each CPU. It is assumed that the auxtrace + * data is in ascending order. + * + * Each queue has a double linked list of auxtrace_buffers. This list contains + * the offset and size of a CPU's auxtrace data. During auxtrace processing + * the data portion is mmap()'ed. + * + * To sort the queues in chronological order, all queue access is controlled + * by the auxtrace_heap. This is basicly a stack, each stack element has two + * entries, the queue number and a time stamp. However the stack is sorted by + * the time stamps. The highest time stamp is at the bottom the lowest + * (nearest) time stamp is at the top. That sort order is maintained at all + * times! + * + * After the auxtrace infrastructure has been setup, the auxtrace queues are + * filled with data (offset/size pairs) and the auxtrace_heap is populated. + * + * 2. PERF_RECORD_XXX processing triggers access to the auxtrace_queues. + * Each record is handled by s390_cpumsf_process_event(). The time stamp of + * the perf record is compared with the time stamp located on the auxtrace_heap + * top element. If that time stamp is lower than the time stamp from the + * record sample, the auxtrace queues will be processed. As auxtrace queues + * control many auxtrace_buffers and each buffer can be quite large, the + * auxtrace buffer might be processed only partially. In this case the + * position in the auxtrace_buffer of that queue is remembered and the time + * stamp of the last processed entry of the auxtrace_buffer replaces the + * current auxtrace_heap top. + * + * 3. Auxtrace_queues might run of out data and are feeded by the + * PERF_RECORD_AUXTRACE handling, see s390_cpumsf_process_auxtrace_event(). + * + * Event Generation + * Each sampling-data entry in the auxilary trace data generates a perf sample. + * This sample is filled + * with data from the auxtrace such as PID/TID, instruction address, CPU state, + * etc. This sample is processed with perf_session__deliver_synth_event() to + * be included into the GUI. + * + * 4. PERF_RECORD_FINISHED_ROUND event is used to process all the remaining + * auxiliary traces entries until the time stamp of this record is reached + * auxtrace_heap top. This is triggered by ordered_event->deliver(). + * + * + * Perf event processing. + * Event processing of PERF_RECORD_XXX entries relies on time stamp entries. + * This is the function call sequence: + * + * __cmd_report() + * | + * perf_session__process_events() + * | + * __perf_session__process_events() + * | + * perf_session__process_event() + * | This functions splits the PERF_RECORD_XXX records. + * | - Those generated by perf record command (type number equal or higher + * | than PERF_RECORD_USER_TYPE_START) are handled by + * | perf_session__process_user_event(see below) + * | - Those generated by the kernel are handled by + * | perf_evlist__parse_sample_timestamp() + * | + * perf_evlist__parse_sample_timestamp() + * | Extract time stamp from sample data. + * | + * perf_session__queue_event() + * | If timestamp is positive the sample is entered into an ordered_event + * | list, sort order is the timestamp. The event processing is deferred until + * | later (see perf_session__process_user_event()). + * | Other timestamps (0 or -1) are handled immediately by + * | perf_session__deliver_event(). These are events generated at start up + * | of command perf record. They create PERF_RECORD_COMM and PERF_RECORD_MMAP* + * | records. They are needed to create a list of running processes and its + * | memory mappings and layout. They are needed at the beginning to enable + * | command perf report to create process trees and memory mappings. + * | + * perf_session__deliver_event() + * | Delivers a PERF_RECORD_XXX entry for handling. + * | + * auxtrace__process_event() + * | The timestamp of the PERF_RECORD_XXX entry is taken to correlate with + * | time stamps from the auxiliary trace buffers. This enables + * | synchronization between auxiliary trace data and the events on the + * | perf.data file. + * | + * machine__deliver_event() + * | Handles the PERF_RECORD_XXX event. This depends on the record type. + * It might update the process tree, update a process memory map or enter + * a sample with IP and call back chain data into GUI data pool. + * + * + * Deferred processing determined by perf_session__process_user_event() is + * finally processed when a PERF_RECORD_FINISHED_ROUND is encountered. These + * are generated during command perf record. + * The timestamp of PERF_RECORD_FINISHED_ROUND event is taken to process all + * PERF_RECORD_XXX entries stored in the ordered_event list. This list was + * built up while reading the perf.data file. + * Each event is now processed by calling perf_session__deliver_event(). + * This enables time synchronization between the data in the perf.data file and + * the data in the auxiliary trace buffers. + */ + +#include <endian.h> +#include <errno.h> +#include <byteswap.h> +#include <inttypes.h> +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/bitops.h> +#include <linux/log2.h> + +#include "cpumap.h" +#include "color.h" +#include "evsel.h" +#include "evlist.h" +#include "machine.h" +#include "session.h" +#include "util.h" +#include "thread.h" +#include "debug.h" +#include "auxtrace.h" +#include "s390-cpumsf.h" +#include "s390-cpumsf-kernel.h" + +struct s390_cpumsf { + struct auxtrace auxtrace; + struct auxtrace_queues queues; + struct auxtrace_heap heap; + struct perf_session *session; + struct machine *machine; + u32 auxtrace_type; + u32 pmu_type; + u16 machine_type; + bool data_queued; +}; + +struct s390_cpumsf_queue { + struct s390_cpumsf *sf; + unsigned int queue_nr; + struct auxtrace_buffer *buffer; + int cpu; +}; + +/* Display s390 CPU measurement facility basic-sampling data entry */ +static bool s390_cpumsf_basic_show(const char *color, size_t pos, + struct hws_basic_entry *basic) +{ + if (basic->def != 1) { + pr_err("Invalid AUX trace basic entry [%#08zx]\n", pos); + return false; + } + color_fprintf(stdout, color, " [%#08zx] Basic Def:%04x Inst:%#04x" + " %c%c%c%c AS:%d ASN:%#04x IA:%#018llx\n" + "\t\tCL:%d HPP:%#018llx GPP:%#018llx\n", + pos, basic->def, basic->U, + basic->T ? 'T' : ' ', + basic->W ? 'W' : ' ', + basic->P ? 'P' : ' ', + basic->I ? 'I' : ' ', + basic->AS, basic->prim_asn, basic->ia, basic->CL, + basic->hpp, basic->gpp); + return true; +} + +/* Display s390 CPU measurement facility diagnostic-sampling data entry */ +static bool s390_cpumsf_diag_show(const char *color, size_t pos, + struct hws_diag_entry *diag) +{ + if (diag->def < S390_CPUMSF_DIAG_DEF_FIRST) { + pr_err("Invalid AUX trace diagnostic entry [%#08zx]\n", pos); + return false; + } + color_fprintf(stdout, color, " [%#08zx] Diag Def:%04x %c\n", + pos, diag->def, diag->I ? 'I' : ' '); + return true; +} + +/* Return TOD timestamp contained in an trailer entry */ +static unsigned long long trailer_timestamp(struct hws_trailer_entry *te) +{ + /* te->t set: TOD in STCKE format, bytes 8-15 + * to->t not set: TOD in STCK format, bytes 0-7 + */ + unsigned long long ts; + + memcpy(&ts, &te->timestamp[te->t], sizeof(ts)); + return ts; +} + +/* Display s390 CPU measurement facility trailer entry */ +static bool s390_cpumsf_trailer_show(const char *color, size_t pos, + struct hws_trailer_entry *te) +{ + if (te->bsdes != sizeof(struct hws_basic_entry)) { + pr_err("Invalid AUX trace trailer entry [%#08zx]\n", pos); + return false; + } + color_fprintf(stdout, color, " [%#08zx] Trailer %c%c%c bsdes:%d" + " dsdes:%d Overflow:%lld Time:%#llx\n" + "\t\tC:%d TOD:%#lx 1:%#llx 2:%#llx\n", + pos, + te->f ? 'F' : ' ', + te->a ? 'A' : ' ', + te->t ? 'T' : ' ', + te->bsdes, te->dsdes, te->overflow, + trailer_timestamp(te), te->clock_base, te->progusage2, + te->progusage[0], te->progusage[1]); + return true; +} + +/* Test a sample data block. It must be 4KB or a multiple thereof in size and + * 4KB page aligned. Each sample data page has a trailer entry at the + * end which contains the sample entry data sizes. + * + * Return true if the sample data block passes the checks and set the + * basic set entry size and diagnostic set entry size. + * + * Return false on failure. + * + * Note: Old hardware does not set the basic or diagnostic entry sizes + * in the trailer entry. Use the type number instead. + */ +static bool s390_cpumsf_validate(int machine_type, + unsigned char *buf, size_t len, + unsigned short *bsdes, + unsigned short *dsdes) +{ + struct hws_basic_entry *basic = (struct hws_basic_entry *)buf; + struct hws_trailer_entry *te; + + *dsdes = *bsdes = 0; + if (len & (S390_CPUMSF_PAGESZ - 1)) /* Illegal size */ + return false; + if (basic->def != 1) /* No basic set entry, must be first */ + return false; + /* Check for trailer entry at end of SDB */ + te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ + - sizeof(*te)); + *bsdes = te->bsdes; + *dsdes = te->dsdes; + if (!te->bsdes && !te->dsdes) { + /* Very old hardware, use CPUID */ + switch (machine_type) { + case 2097: + case 2098: + *dsdes = 64; + *bsdes = 32; + break; + case 2817: + case 2818: + *dsdes = 74; + *bsdes = 32; + break; + case 2827: + case 2828: + *dsdes = 85; + *bsdes = 32; + break; + default: + /* Illegal trailer entry */ + return false; + } + } + return true; +} + +/* Return true if there is room for another entry */ +static bool s390_cpumsf_reached_trailer(size_t entry_sz, size_t pos) +{ + size_t payload = S390_CPUMSF_PAGESZ - sizeof(struct hws_trailer_entry); + + if (payload - (pos & (S390_CPUMSF_PAGESZ - 1)) < entry_sz) + return false; + return true; +} + +/* Dump an auxiliary buffer. These buffers are multiple of + * 4KB SDB pages. + */ +static void s390_cpumsf_dump(struct s390_cpumsf *sf, + unsigned char *buf, size_t len) +{ + const char *color = PERF_COLOR_BLUE; + struct hws_basic_entry *basic; + struct hws_diag_entry *diag; + unsigned short bsdes, dsdes; + size_t pos = 0; + + color_fprintf(stdout, color, + ". ... s390 AUX data: size %zu bytes\n", + len); + + if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes, + &dsdes)) { + pr_err("Invalid AUX trace data block size:%zu" + " (type:%d bsdes:%hd dsdes:%hd)\n", + len, sf->machine_type, bsdes, dsdes); + return; + } + + /* s390 kernel always returns 4KB blocks fully occupied, + * no partially filled SDBs. + */ + while (pos < len) { + /* Handle Basic entry */ + basic = (struct hws_basic_entry *)(buf + pos); + if (s390_cpumsf_basic_show(color, pos, basic)) + pos += bsdes; + else + return; + + /* Handle Diagnostic entry */ + diag = (struct hws_diag_entry *)(buf + pos); + if (s390_cpumsf_diag_show(color, pos, diag)) + pos += dsdes; + else + return; + + /* Check for trailer entry */ + if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) { + /* Show trailer entry */ + struct hws_trailer_entry te; + + pos = (pos + S390_CPUMSF_PAGESZ) + & ~(S390_CPUMSF_PAGESZ - 1); + pos -= sizeof(te); + memcpy(&te, buf + pos, sizeof(te)); + /* Set descriptor sizes in case of old hardware + * where these values are not set. + */ + te.bsdes = bsdes; + te.dsdes = dsdes; + if (s390_cpumsf_trailer_show(color, pos, &te)) + pos += sizeof(te); + else + return; + } + } +} + +static void s390_cpumsf_dump_event(struct s390_cpumsf *sf, unsigned char *buf, + size_t len) +{ + printf(".\n"); + s390_cpumsf_dump(sf, buf, len); +} + +#define S390_LPP_PID_MASK 0xffffffff + +static bool s390_cpumsf_make_event(size_t pos, + struct hws_basic_entry *basic, + struct s390_cpumsf_queue *sfq) +{ + struct perf_sample sample = { + .ip = basic->ia, + .pid = basic->hpp & S390_LPP_PID_MASK, + .tid = basic->hpp & S390_LPP_PID_MASK, + .cpumode = PERF_RECORD_MISC_CPUMODE_UNKNOWN, + .cpu = sfq->cpu, + .period = 1 + }; + union perf_event event; + + memset(&event, 0, sizeof(event)); + if (basic->CL == 1) /* Native LPAR mode */ + sample.cpumode = basic->P ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + else if (basic->CL == 2) /* Guest kernel/user space */ + sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else if (basic->gpp || basic->prim_asn != 0xffff) + /* Use heuristics on old hardware */ + sample.cpumode = basic->P ? PERF_RECORD_MISC_GUEST_USER + : PERF_RECORD_MISC_GUEST_KERNEL; + else + sample.cpumode = basic->P ? PERF_RECORD_MISC_USER + : PERF_RECORD_MISC_KERNEL; + + event.sample.header.type = PERF_RECORD_SAMPLE; + event.sample.header.misc = sample.cpumode; + event.sample.header.size = sizeof(struct perf_event_header); + + pr_debug4("%s pos:%#zx ip:%#" PRIx64 " P:%d CL:%d pid:%d.%d cpumode:%d cpu:%d\n", + __func__, pos, sample.ip, basic->P, basic->CL, sample.pid, + sample.tid, sample.cpumode, sample.cpu); + if (perf_session__deliver_synth_event(sfq->sf->session, &event, + &sample)) { + pr_err("s390 Auxiliary Trace: failed to deliver event\n"); + return false; + } + return true; +} + +static unsigned long long get_trailer_time(const unsigned char *buf) +{ + struct hws_trailer_entry *te; + unsigned long long aux_time; + + te = (struct hws_trailer_entry *)(buf + S390_CPUMSF_PAGESZ + - sizeof(*te)); + + if (!te->clock_base) /* TOD_CLOCK_BASE value missing */ + return 0; + + /* Correct calculation to convert time stamp in trailer entry to + * nano seconds (taken from arch/s390 function tod_to_ns()). + * TOD_CLOCK_BASE is stored in trailer entry member progusage2. + */ + aux_time = trailer_timestamp(te) - te->progusage2; + aux_time = (aux_time >> 9) * 125 + (((aux_time & 0x1ff) * 125) >> 9); + return aux_time; +} + +/* Process the data samples of a single queue. The first parameter is a + * pointer to the queue, the second parameter is the time stamp. This + * is the time stamp: + * - of the event that triggered this processing. + * - or the time stamp when the last proccesing of this queue stopped. + * In this case it stopped at a 4KB page boundary and record the + * position on where to continue processing on the next invocation + * (see buffer->use_data and buffer->use_size). + * + * When this function returns the second parameter is updated to + * reflect the time stamp of the last processed auxiliary data entry + * (taken from the trailer entry of that page). The caller uses this + * returned time stamp to record the last processed entry in this + * queue. + * + * The function returns: + * 0: Processing successful. The second parameter returns the + * time stamp from the trailer entry until which position + * processing took place. Subsequent calls resume from this + * position. + * <0: An error occurred during processing. The second parameter + * returns the maximum time stamp. + * >0: Done on this queue. The second parameter returns the + * maximum time stamp. + */ +static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts) +{ + struct s390_cpumsf *sf = sfq->sf; + unsigned char *buf = sfq->buffer->use_data; + size_t len = sfq->buffer->use_size; + struct hws_basic_entry *basic; + unsigned short bsdes, dsdes; + size_t pos = 0; + int err = 1; + u64 aux_ts; + + if (!s390_cpumsf_validate(sf->machine_type, buf, len, &bsdes, + &dsdes)) { + *ts = ~0ULL; + return -1; + } + + /* Get trailer entry time stamp and check if entries in + * this auxiliary page are ready for processing. If the + * time stamp of the first entry is too high, whole buffer + * can be skipped. In this case return time stamp. + */ + aux_ts = get_trailer_time(buf); + if (!aux_ts) { + pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n", + sfq->buffer->data_offset); + aux_ts = ~0ULL; + goto out; + } + if (aux_ts > *ts) { + *ts = aux_ts; + return 0; + } + + while (pos < len) { + /* Handle Basic entry */ + basic = (struct hws_basic_entry *)(buf + pos); + if (s390_cpumsf_make_event(pos, basic, sfq)) + pos += bsdes; + else { + err = -EBADF; + goto out; + } + + pos += dsdes; /* Skip diagnositic entry */ + + /* Check for trailer entry */ + if (!s390_cpumsf_reached_trailer(bsdes + dsdes, pos)) { + pos = (pos + S390_CPUMSF_PAGESZ) + & ~(S390_CPUMSF_PAGESZ - 1); + /* Check existence of next page */ + if (pos >= len) + break; + aux_ts = get_trailer_time(buf + pos); + if (!aux_ts) { + aux_ts = ~0ULL; + goto out; + } + if (aux_ts > *ts) { + *ts = aux_ts; + sfq->buffer->use_data += pos; + sfq->buffer->use_size -= pos; + return 0; + } + } + } +out: + *ts = aux_ts; + sfq->buffer->use_size = 0; + sfq->buffer->use_data = NULL; + return err; /* Buffer completely scanned or error */ +} + +/* Run the s390 auxiliary trace decoder. + * Select the queue buffer to operate on, the caller already selected + * the proper queue, depending on second parameter 'ts'. + * This is the time stamp until which the auxiliary entries should + * be processed. This value is updated by called functions and + * returned to the caller. + * + * Resume processing in the current buffer. If there is no buffer + * get a new buffer from the queue and setup start position for + * processing. + * When a buffer is completely processed remove it from the queue + * before returning. + * + * This function returns + * 1: When the queue is empty. Second parameter will be set to + * maximum time stamp. + * 0: Normal processing done. + * <0: Error during queue buffer setup. This causes the caller + * to stop processing completely. + */ +static int s390_cpumsf_run_decoder(struct s390_cpumsf_queue *sfq, + u64 *ts) +{ + + struct auxtrace_buffer *buffer; + struct auxtrace_queue *queue; + int err; + + queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; + + /* Get buffer and last position in buffer to resume + * decoding the auxiliary entries. One buffer might be large + * and decoding might stop in between. This depends on the time + * stamp of the trailer entry in each page of the auxiliary + * data and the time stamp of the event triggering the decoding. + */ + if (sfq->buffer == NULL) { + sfq->buffer = buffer = auxtrace_buffer__next(queue, + sfq->buffer); + if (!buffer) { + *ts = ~0ULL; + return 1; /* Processing done on this queue */ + } + /* Start with a new buffer on this queue */ + if (buffer->data) { + buffer->use_size = buffer->size; + buffer->use_data = buffer->data; + } + } else + buffer = sfq->buffer; + + if (!buffer->data) { + int fd = perf_data__fd(sfq->sf->session->data); + + buffer->data = auxtrace_buffer__get_data(buffer, fd); + if (!buffer->data) + return -ENOMEM; + buffer->use_size = buffer->size; + buffer->use_data = buffer->data; + } + pr_debug4("%s queue_nr:%d buffer:%" PRId64 " offset:%#" PRIx64 " size:%#zx rest:%#zx\n", + __func__, sfq->queue_nr, buffer->buffer_nr, buffer->offset, + buffer->size, buffer->use_size); + err = s390_cpumsf_samples(sfq, ts); + + /* If non-zero, there is either an error (err < 0) or the buffer is + * completely done (err > 0). The error is unrecoverable, usually + * some descriptors could not be read successfully, so continue with + * the next buffer. + * In both cases the parameter 'ts' has been updated. + */ + if (err) { + sfq->buffer = NULL; + list_del(&buffer->list); + auxtrace_buffer__free(buffer); + if (err > 0) /* Buffer done, no error */ + err = 0; + } + return err; +} + +static struct s390_cpumsf_queue * +s390_cpumsf_alloc_queue(struct s390_cpumsf *sf, unsigned int queue_nr) +{ + struct s390_cpumsf_queue *sfq; + + sfq = zalloc(sizeof(struct s390_cpumsf_queue)); + if (sfq == NULL) + return NULL; + + sfq->sf = sf; + sfq->queue_nr = queue_nr; + sfq->cpu = -1; + return sfq; +} + +static int s390_cpumsf_setup_queue(struct s390_cpumsf *sf, + struct auxtrace_queue *queue, + unsigned int queue_nr, u64 ts) +{ + struct s390_cpumsf_queue *sfq = queue->priv; + + if (list_empty(&queue->head)) + return 0; + + if (sfq == NULL) { + sfq = s390_cpumsf_alloc_queue(sf, queue_nr); + if (!sfq) + return -ENOMEM; + queue->priv = sfq; + + if (queue->cpu != -1) + sfq->cpu = queue->cpu; + } + return auxtrace_heap__add(&sf->heap, queue_nr, ts); +} + +static int s390_cpumsf_setup_queues(struct s390_cpumsf *sf, u64 ts) +{ + unsigned int i; + int ret = 0; + + for (i = 0; i < sf->queues.nr_queues; i++) { + ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], + i, ts); + if (ret) + break; + } + return ret; +} + +static int s390_cpumsf_update_queues(struct s390_cpumsf *sf, u64 ts) +{ + if (!sf->queues.new_data) + return 0; + + sf->queues.new_data = false; + return s390_cpumsf_setup_queues(sf, ts); +} + +static int s390_cpumsf_process_queues(struct s390_cpumsf *sf, u64 timestamp) +{ + unsigned int queue_nr; + u64 ts; + int ret; + + while (1) { + struct auxtrace_queue *queue; + struct s390_cpumsf_queue *sfq; + + if (!sf->heap.heap_cnt) + return 0; + + if (sf->heap.heap_array[0].ordinal >= timestamp) + return 0; + + queue_nr = sf->heap.heap_array[0].queue_nr; + queue = &sf->queues.queue_array[queue_nr]; + sfq = queue->priv; + + auxtrace_heap__pop(&sf->heap); + if (sf->heap.heap_cnt) { + ts = sf->heap.heap_array[0].ordinal + 1; + if (ts > timestamp) + ts = timestamp; + } else { + ts = timestamp; + } + + ret = s390_cpumsf_run_decoder(sfq, &ts); + if (ret < 0) { + auxtrace_heap__add(&sf->heap, queue_nr, ts); + return ret; + } + if (!ret) { + ret = auxtrace_heap__add(&sf->heap, queue_nr, ts); + if (ret < 0) + return ret; + } + } + return 0; +} + +static int s390_cpumsf_synth_error(struct s390_cpumsf *sf, int code, int cpu, + pid_t pid, pid_t tid, u64 ip) +{ + char msg[MAX_AUXTRACE_ERROR_MSG]; + union perf_event event; + int err; + + strncpy(msg, "Lost Auxiliary Trace Buffer", sizeof(msg) - 1); + auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE, + code, cpu, pid, tid, ip, msg); + + err = perf_session__deliver_synth_event(sf->session, &event, NULL); + if (err) + pr_err("s390 Auxiliary Trace: failed to deliver error event," + "error %d\n", err); + return err; +} + +static int s390_cpumsf_lost(struct s390_cpumsf *sf, struct perf_sample *sample) +{ + return s390_cpumsf_synth_error(sf, 1, sample->cpu, + sample->pid, sample->tid, 0); +} + +static int +s390_cpumsf_process_event(struct perf_session *session __maybe_unused, + union perf_event *event, + struct perf_sample *sample, + struct perf_tool *tool) +{ + struct s390_cpumsf *sf = container_of(session->auxtrace, + struct s390_cpumsf, + auxtrace); + u64 timestamp = sample->time; + int err = 0; + + if (dump_trace) + return 0; + + if (!tool->ordered_events) { + pr_err("s390 Auxiliary Trace requires ordered events\n"); + return -EINVAL; + } + + if (event->header.type == PERF_RECORD_AUX && + event->aux.flags & PERF_AUX_FLAG_TRUNCATED) + return s390_cpumsf_lost(sf, sample); + + if (timestamp) { + err = s390_cpumsf_update_queues(sf, timestamp); + if (!err) + err = s390_cpumsf_process_queues(sf, timestamp); + } + return err; +} + +struct s390_cpumsf_synth { + struct perf_tool cpumsf_tool; + struct perf_session *session; +}; + +static int +s390_cpumsf_process_auxtrace_event(struct perf_session *session, + union perf_event *event __maybe_unused, + struct perf_tool *tool __maybe_unused) +{ + struct s390_cpumsf *sf = container_of(session->auxtrace, + struct s390_cpumsf, + auxtrace); + + int fd = perf_data__fd(session->data); + struct auxtrace_buffer *buffer; + off_t data_offset; + int err; + + if (sf->data_queued) + return 0; + + if (perf_data__is_pipe(session->data)) { + data_offset = 0; + } else { + data_offset = lseek(fd, 0, SEEK_CUR); + if (data_offset == -1) + return -errno; + } + + err = auxtrace_queues__add_event(&sf->queues, session, event, + data_offset, &buffer); + if (err) + return err; + + /* Dump here after copying piped trace out of the pipe */ + if (dump_trace) { + if (auxtrace_buffer__get_data(buffer, fd)) { + s390_cpumsf_dump_event(sf, buffer->data, + buffer->size); + auxtrace_buffer__put_data(buffer); + } + } + return 0; +} + +static void s390_cpumsf_free_events(struct perf_session *session __maybe_unused) +{ +} + +static int s390_cpumsf_flush(struct perf_session *session __maybe_unused, + struct perf_tool *tool __maybe_unused) +{ + return 0; +} + +static void s390_cpumsf_free_queues(struct perf_session *session) +{ + struct s390_cpumsf *sf = container_of(session->auxtrace, + struct s390_cpumsf, + auxtrace); + struct auxtrace_queues *queues = &sf->queues; + unsigned int i; + + for (i = 0; i < queues->nr_queues; i++) + zfree(&queues->queue_array[i].priv); + auxtrace_queues__free(queues); +} + +static void s390_cpumsf_free(struct perf_session *session) +{ + struct s390_cpumsf *sf = container_of(session->auxtrace, + struct s390_cpumsf, + auxtrace); + + auxtrace_heap__free(&sf->heap); + s390_cpumsf_free_queues(session); + session->auxtrace = NULL; + free(sf); +} + +static int s390_cpumsf_get_type(const char *cpuid) +{ + int ret, family = 0; + + ret = sscanf(cpuid, "%*[^,],%u", &family); + return (ret == 1) ? family : 0; +} + +/* Check itrace options set on perf report command. + * Return true, if none are set or all options specified can be + * handled on s390. + * Return false otherwise. + */ +static bool check_auxtrace_itrace(struct itrace_synth_opts *itops) +{ + if (!itops || !itops->set) + return true; + pr_err("No --itrace options supported\n"); + return false; +} + +int s390_cpumsf_process_auxtrace_info(union perf_event *event, + struct perf_session *session) +{ + struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info; + struct s390_cpumsf *sf; + int err; + + if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event)) + return -EINVAL; + + sf = zalloc(sizeof(struct s390_cpumsf)); + if (sf == NULL) + return -ENOMEM; + + if (!check_auxtrace_itrace(session->itrace_synth_opts)) { + err = -EINVAL; + goto err_free; + } + + err = auxtrace_queues__init(&sf->queues); + if (err) + goto err_free; + + sf->session = session; + sf->machine = &session->machines.host; /* No kvm support */ + sf->auxtrace_type = auxtrace_info->type; + sf->pmu_type = PERF_TYPE_RAW; + sf->machine_type = s390_cpumsf_get_type(session->evlist->env->cpuid); + + sf->auxtrace.process_event = s390_cpumsf_process_event; + sf->auxtrace.process_auxtrace_event = s390_cpumsf_process_auxtrace_event; + sf->auxtrace.flush_events = s390_cpumsf_flush; + sf->auxtrace.free_events = s390_cpumsf_free_events; + sf->auxtrace.free = s390_cpumsf_free; + session->auxtrace = &sf->auxtrace; + + if (dump_trace) + return 0; + + err = auxtrace_queues__process_index(&sf->queues, session); + if (err) + goto err_free_queues; + + if (sf->queues.populated) + sf->data_queued = true; + + return 0; + +err_free_queues: + auxtrace_queues__free(&sf->queues); + session->auxtrace = NULL; +err_free: + free(sf); + return err; +} diff --git a/tools/perf/util/s390-cpumsf.h b/tools/perf/util/s390-cpumsf.h new file mode 100644 index 000000000000..fb64d100555c --- /dev/null +++ b/tools/perf/util/s390-cpumsf.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright IBM Corp. 2018 + * Auxtrace support for s390 CPU-Measurement Sampling Facility + * + * Author(s): Thomas Richter <tmricht@linux.ibm.com> + */ + +#ifndef INCLUDE__PERF_S390_CPUMSF_H +#define INCLUDE__PERF_S390_CPUMSF_H + +union perf_event; +struct perf_session; +struct perf_pmu; + +struct auxtrace_record * +s390_cpumsf_recording_init(int *err, struct perf_pmu *s390_cpumsf_pmu); + +int s390_cpumsf_process_auxtrace_info(union perf_event *event, + struct perf_session *session); +#endif diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 7b79c413486b..45484f0f7292 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -535,7 +535,7 @@ static int perl_stop_script(void) return 0; } -static int perl_generate_script(struct pevent *pevent, const char *outfile) +static int perl_generate_script(struct tep_handle *pevent, const char *outfile) { struct event_format *event = NULL; struct format_field *f; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index bc32e57d17be..dfc6093f118c 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -871,8 +871,8 @@ static void python_process_tracepoint(struct perf_sample *sample, offset = field->offset; len = field->size; if (field->flags & FIELD_IS_DYNAMIC) { - val = pevent_read_number(scripting_context->pevent, - data + offset, len); + val = tep_read_number(scripting_context->pevent, + data + offset, len); offset = val; len = offset >> 16; offset &= 0xffff; @@ -1588,7 +1588,7 @@ static int python_stop_script(void) return 0; } -static int python_generate_script(struct pevent *pevent, const char *outfile) +static int python_generate_script(struct tep_handle *pevent, const char *outfile) { struct event_format *event = NULL; struct format_field *f; diff --git a/tools/perf/util/setup.py b/tools/perf/util/setup.py index 001be4f9d3b9..97efbcad076e 100644 --- a/tools/perf/util/setup.py +++ b/tools/perf/util/setup.py @@ -1,12 +1,20 @@ #!/usr/bin/python from os import getenv +from subprocess import Popen, PIPE +from re import sub + +def clang_has_option(option): + return [o for o in Popen(['clang', option], stderr=PIPE).stderr.readlines() if "unknown argument" in o] == [ ] cc = getenv("CC") if cc == "clang": from _sysconfigdata import build_time_vars - from re import sub build_time_vars["CFLAGS"] = sub("-specs=[^ ]+", "", build_time_vars["CFLAGS"]) + if not clang_has_option("-mcet"): + build_time_vars["CFLAGS"] = sub("-mcet", "", build_time_vars["CFLAGS"]) + if not clang_has_option("-fcf-protection"): + build_time_vars["CFLAGS"] = sub("-fcf-protection", "", build_time_vars["CFLAGS"]) from distutils.core import setup, Extension diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index fed2952ab45a..b284276ec963 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -601,7 +601,7 @@ static char *get_trace_output(struct hist_entry *he) { struct trace_seq seq; struct perf_evsel *evsel; - struct pevent_record rec = { + struct tep_record rec = { .data = he->raw_data, .size = he->raw_size, }; @@ -610,10 +610,10 @@ static char *get_trace_output(struct hist_entry *he) trace_seq_init(&seq); if (symbol_conf.raw_trace) { - pevent_print_fields(&seq, he->raw_data, he->raw_size, - evsel->tp_format); + tep_print_fields(&seq, he->raw_data, he->raw_size, + evsel->tp_format); } else { - pevent_event_info(&seq, evsel->tp_format, &rec); + tep_event_info(&seq, evsel->tp_format, &rec); } /* * Trim the buffer, it starts at 4KB and we're not going to @@ -2047,7 +2047,7 @@ static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp, struct trace_seq seq; raw_field: trace_seq_init(&seq); - pevent_print_field(&seq, he->raw_data, hde->field); + tep_print_field(&seq, he->raw_data, hde->field); str = seq.buffer; } @@ -2074,7 +2074,7 @@ static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt, if (field->flags & FIELD_IS_DYNAMIC) { unsigned long long dyn; - pevent_read_number_field(field, a->raw_data, &dyn); + tep_read_number_field(field, a->raw_data, &dyn); offset = dyn & 0xffff; size = (dyn >> 16) & 0xffff; @@ -2311,7 +2311,7 @@ static int add_all_matching_fields(struct perf_evlist *evlist, if (evsel->attr.type != PERF_TYPE_TRACEPOINT) continue; - field = pevent_find_any_field(evsel->tp_format, field_name); + field = tep_find_any_field(evsel->tp_format, field_name); if (field == NULL) continue; @@ -2378,7 +2378,7 @@ static int add_dynamic_entry(struct perf_evlist *evlist, const char *tok, if (!strcmp(field_name, "*")) { ret = add_evsel_fields(evsel, raw_trace, level); } else { - field = pevent_find_any_field(evsel->tp_format, field_name); + field = tep_find_any_field(evsel->tp_format, field_name); if (field == NULL) { pr_debug("Cannot find event field for %s.%s\n", event_name, field_name); diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index 8bf302cafcec..a97cf8e6be86 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -276,7 +276,7 @@ extern struct sort_entry sort_thread; extern struct list_head hist_entry__sort_list; struct perf_evlist; -struct pevent; +struct tep_handle; int setup_sorting(struct perf_evlist *evlist); int setup_output_field(void); void reset_output_field(void); diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index e0a6e9a6a053..920b1d58a068 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -32,7 +32,7 @@ static int get_common_field(struct scripting_context *context, int *offset, int *size, const char *type) { - struct pevent *pevent = context->pevent; + struct tep_handle *pevent = context->pevent; struct event_format *event; struct format_field *field; @@ -41,14 +41,14 @@ static int get_common_field(struct scripting_context *context, return 0; event = pevent->events[0]; - field = pevent_find_common_field(event, type); + field = tep_find_common_field(event, type); if (!field) return 0; *offset = field->offset; *size = field->size; } - return pevent_read_number(pevent, context->event_data + *offset, *size); + return tep_read_number(pevent, context->event_data + *offset, *size); } int common_lock_depth(struct scripting_context *context) @@ -99,24 +99,24 @@ raw_field_value(struct event_format *event, const char *name, void *data) struct format_field *field; unsigned long long val; - field = pevent_find_any_field(event, name); + field = tep_find_any_field(event, name); if (!field) return 0ULL; - pevent_read_number_field(field, data, &val); + tep_read_number_field(field, data, &val); return val; } unsigned long long read_size(struct event_format *event, void *ptr, int size) { - return pevent_read_number(event->pevent, ptr, size); + return tep_read_number(event->pevent, ptr, size); } void event_format__fprintf(struct event_format *event, int cpu, void *data, int size, FILE *fp) { - struct pevent_record record; + struct tep_record record; struct trace_seq s; memset(&record, 0, sizeof(record)); @@ -125,7 +125,7 @@ void event_format__fprintf(struct event_format *event, record.data = data; trace_seq_init(&s); - pevent_event_info(&s, event, &record); + tep_event_info(&s, event, &record); trace_seq_do_fprintf(&s, fp); trace_seq_destroy(&s); } @@ -136,7 +136,7 @@ void event_format__print(struct event_format *event, return event_format__fprintf(event, cpu, data, size, stdout); } -void parse_ftrace_printk(struct pevent *pevent, +void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size __maybe_unused) { unsigned long long addr; @@ -157,11 +157,11 @@ void parse_ftrace_printk(struct pevent *pevent, /* fmt still has a space, skip it */ printk = strdup(fmt+1); line = strtok_r(NULL, "\n", &next); - pevent_register_print_string(pevent, printk, addr); + tep_register_print_string(pevent, printk, addr); } } -void parse_saved_cmdline(struct pevent *pevent, +void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size __maybe_unused) { char *comm; @@ -172,24 +172,24 @@ void parse_saved_cmdline(struct pevent *pevent, line = strtok_r(file, "\n", &next); while (line) { sscanf(line, "%d %ms", &pid, &comm); - pevent_register_comm(pevent, comm, pid); + tep_register_comm(pevent, comm, pid); free(comm); line = strtok_r(NULL, "\n", &next); } } -int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size) +int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size) { - return pevent_parse_event(pevent, buf, size, "ftrace"); + return tep_parse_event(pevent, buf, size, "ftrace"); } -int parse_event_file(struct pevent *pevent, +int parse_event_file(struct tep_handle *pevent, char *buf, unsigned long size, char *sys) { - return pevent_parse_event(pevent, buf, size, sys); + return tep_parse_event(pevent, buf, size, sys); } -struct event_format *trace_find_next_event(struct pevent *pevent, +struct event_format *trace_find_next_event(struct tep_handle *pevent, struct event_format *event) { static int idx; diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 40b425949aa3..3dfc1db6b25b 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -96,7 +96,7 @@ static void skip(int size) }; } -static unsigned int read4(struct pevent *pevent) +static unsigned int read4(struct tep_handle *pevent) { unsigned int data; @@ -105,7 +105,7 @@ static unsigned int read4(struct pevent *pevent) return __data2host4(pevent, data); } -static unsigned long long read8(struct pevent *pevent) +static unsigned long long read8(struct tep_handle *pevent) { unsigned long long data; @@ -158,7 +158,7 @@ out: return str; } -static int read_proc_kallsyms(struct pevent *pevent) +static int read_proc_kallsyms(struct tep_handle *pevent) { unsigned int size; @@ -181,7 +181,7 @@ static int read_proc_kallsyms(struct pevent *pevent) return 0; } -static int read_ftrace_printk(struct pevent *pevent) +static int read_ftrace_printk(struct tep_handle *pevent) { unsigned int size; char *buf; @@ -208,7 +208,7 @@ static int read_ftrace_printk(struct pevent *pevent) return 0; } -static int read_header_files(struct pevent *pevent) +static int read_header_files(struct tep_handle *pevent) { unsigned long long size; char *header_page; @@ -235,13 +235,13 @@ static int read_header_files(struct pevent *pevent) return -1; } - if (!pevent_parse_header_page(pevent, header_page, size, - pevent_get_long_size(pevent))) { + if (!tep_parse_header_page(pevent, header_page, size, + tep_get_long_size(pevent))) { /* * The commit field in the page is of type long, * use that instead, since it represents the kernel. */ - pevent_set_long_size(pevent, pevent->header_page_size_size); + tep_set_long_size(pevent, pevent->header_page_size_size); } free(header_page); @@ -259,7 +259,7 @@ static int read_header_files(struct pevent *pevent) return ret; } -static int read_ftrace_file(struct pevent *pevent, unsigned long long size) +static int read_ftrace_file(struct tep_handle *pevent, unsigned long long size) { int ret; char *buf; @@ -284,8 +284,8 @@ out: return ret; } -static int read_event_file(struct pevent *pevent, char *sys, - unsigned long long size) +static int read_event_file(struct tep_handle *pevent, char *sys, + unsigned long long size) { int ret; char *buf; @@ -310,7 +310,7 @@ out: return ret; } -static int read_ftrace_files(struct pevent *pevent) +static int read_ftrace_files(struct tep_handle *pevent) { unsigned long long size; int count; @@ -328,7 +328,7 @@ static int read_ftrace_files(struct pevent *pevent) return 0; } -static int read_event_files(struct pevent *pevent) +static int read_event_files(struct tep_handle *pevent) { unsigned long long size; char *sys; @@ -356,7 +356,7 @@ static int read_event_files(struct pevent *pevent) return 0; } -static int read_saved_cmdline(struct pevent *pevent) +static int read_saved_cmdline(struct tep_handle *pevent) { unsigned long long size; char *buf; @@ -399,7 +399,7 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) int host_bigendian; int file_long_size; int file_page_size; - struct pevent *pevent = NULL; + struct tep_handle *pevent = NULL; int err; repipe = __repipe; @@ -439,9 +439,9 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) pevent = tevent->pevent; - pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); - pevent_set_file_bigendian(pevent, file_bigendian); - pevent_set_host_bigendian(pevent, host_bigendian); + tep_set_flag(pevent, TEP_NSEC_OUTPUT); + tep_set_file_bigendian(pevent, file_bigendian); + tep_set_host_bigendian(pevent, host_bigendian); if (do_read(buf, 1) < 0) goto out; @@ -451,8 +451,8 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) if (!file_page_size) goto out; - pevent_set_long_size(pevent, file_long_size); - pevent_set_page_size(pevent, file_page_size); + tep_set_long_size(pevent, file_long_size); + tep_set_page_size(pevent, file_page_size); err = read_header_files(pevent); if (err) @@ -479,9 +479,9 @@ ssize_t trace_report(int fd, struct trace_event *tevent, bool __repipe) repipe = false; if (show_funcs) { - pevent_print_funcs(pevent); + tep_print_funcs(pevent); } else if (show_printk) { - pevent_print_printk(pevent); + tep_print_printk(pevent); } pevent = NULL; diff --git a/tools/perf/util/trace-event-scripting.c b/tools/perf/util/trace-event-scripting.c index b1e5c3a2b8e3..b749f812ac70 100644 --- a/tools/perf/util/trace-event-scripting.c +++ b/tools/perf/util/trace-event-scripting.c @@ -66,7 +66,7 @@ static int python_start_script_unsupported(const char *script __maybe_unused, return -1; } -static int python_generate_script_unsupported(struct pevent *pevent +static int python_generate_script_unsupported(struct tep_handle *pevent __maybe_unused, const char *outfile __maybe_unused) @@ -130,7 +130,7 @@ static int perl_start_script_unsupported(const char *script __maybe_unused, return -1; } -static int perl_generate_script_unsupported(struct pevent *pevent +static int perl_generate_script_unsupported(struct tep_handle *pevent __maybe_unused, const char *outfile __maybe_unused) { diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c index 1aa368603268..58bb72f266f3 100644 --- a/tools/perf/util/trace-event.c +++ b/tools/perf/util/trace-event.c @@ -28,10 +28,10 @@ static bool tevent_initialized; int trace_event__init(struct trace_event *t) { - struct pevent *pevent = pevent_alloc(); + struct tep_handle *pevent = tep_alloc(); if (pevent) { - t->plugin_list = traceevent_load_plugins(pevent); + t->plugin_list = tep_load_plugins(pevent); t->pevent = pevent; } @@ -40,33 +40,33 @@ int trace_event__init(struct trace_event *t) static int trace_event__init2(void) { - int be = traceevent_host_bigendian(); - struct pevent *pevent; + int be = tep_host_bigendian(); + struct tep_handle *pevent; if (trace_event__init(&tevent)) return -1; pevent = tevent.pevent; - pevent_set_flag(pevent, PEVENT_NSEC_OUTPUT); - pevent_set_file_bigendian(pevent, be); - pevent_set_host_bigendian(pevent, be); + tep_set_flag(pevent, TEP_NSEC_OUTPUT); + tep_set_file_bigendian(pevent, be); + tep_set_host_bigendian(pevent, be); tevent_initialized = true; return 0; } int trace_event__register_resolver(struct machine *machine, - pevent_func_resolver_t *func) + tep_func_resolver_t *func) { if (!tevent_initialized && trace_event__init2()) return -1; - return pevent_set_function_resolver(tevent.pevent, func, machine); + return tep_set_function_resolver(tevent.pevent, func, machine); } void trace_event__cleanup(struct trace_event *t) { - traceevent_unload_plugins(t->plugin_list, t->pevent); - pevent_free(t->pevent); + tep_unload_plugins(t->plugin_list, t->pevent); + tep_free(t->pevent); } /* @@ -76,7 +76,7 @@ static struct event_format* tp_format(const char *sys, const char *name) { char *tp_dir = get_events_file(sys); - struct pevent *pevent = tevent.pevent; + struct tep_handle *pevent = tevent.pevent; struct event_format *event = NULL; char path[PATH_MAX]; size_t size; @@ -93,7 +93,7 @@ tp_format(const char *sys, const char *name) if (err) return ERR_PTR(err); - pevent_parse_format(pevent, &event, data, size, sys); + tep_parse_format(pevent, &event, data, size, sys); free(data); return event; @@ -116,5 +116,5 @@ struct event_format *trace_event__tp_format_id(int id) if (!tevent_initialized && trace_event__init2()) return ERR_PTR(-ENOMEM); - return pevent_find_event(tevent.pevent, id); + return tep_find_event(tevent.pevent, id); } diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index dcbdb53dc702..40204ec3a7a2 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -13,14 +13,14 @@ struct thread; struct plugin_list; struct trace_event { - struct pevent *pevent; + struct tep_handle *pevent; struct plugin_list *plugin_list; }; int trace_event__init(struct trace_event *t); void trace_event__cleanup(struct trace_event *t); int trace_event__register_resolver(struct machine *machine, - pevent_func_resolver_t *func); + tep_func_resolver_t *func); struct event_format* trace_event__tp_format(const char *sys, const char *name); @@ -34,20 +34,20 @@ void event_format__fprintf(struct event_format *event, void event_format__print(struct event_format *event, int cpu, void *data, int size); -int parse_ftrace_file(struct pevent *pevent, char *buf, unsigned long size); -int parse_event_file(struct pevent *pevent, +int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size); +int parse_event_file(struct tep_handle *pevent, char *buf, unsigned long size, char *sys); unsigned long long raw_field_value(struct event_format *event, const char *name, void *data); -void parse_proc_kallsyms(struct pevent *pevent, char *file, unsigned int size); -void parse_ftrace_printk(struct pevent *pevent, char *file, unsigned int size); -void parse_saved_cmdline(struct pevent *pevent, char *file, unsigned int size); +void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); +void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); +void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int size); ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); -struct event_format *trace_find_next_event(struct pevent *pevent, +struct event_format *trace_find_next_event(struct tep_handle *pevent, struct event_format *event); unsigned long long read_size(struct event_format *event, void *ptr, int size); unsigned long long eval_flag(const char *flag); @@ -83,7 +83,7 @@ struct scripting_ops { void (*process_stat)(struct perf_stat_config *config, struct perf_evsel *evsel, u64 tstamp); void (*process_stat_interval)(u64 tstamp); - int (*generate_script) (struct pevent *pevent, const char *outfile); + int (*generate_script) (struct tep_handle *pevent, const char *outfile); }; extern unsigned int scripting_max_stack; @@ -94,7 +94,7 @@ void setup_perl_scripting(void); void setup_python_scripting(void); struct scripting_context { - struct pevent *pevent; + struct tep_handle *pevent; void *event_data; }; diff --git a/tools/perf/util/zlib.c b/tools/perf/util/zlib.c index a725b958cf31..902ce6384f57 100644 --- a/tools/perf/util/zlib.c +++ b/tools/perf/util/zlib.c @@ -5,6 +5,8 @@ #include <sys/stat.h> #include <sys/mman.h> #include <zlib.h> +#include <linux/compiler.h> +#include <unistd.h> #include "util/compress.h" #include "util/util.h" @@ -79,3 +81,19 @@ out_close: return ret == Z_STREAM_END ? 0 : -1; } + +bool gzip_is_compressed(const char *input) +{ + int fd = open(input, O_RDONLY); + const uint8_t magic[2] = { 0x1f, 0x8b }; + char buf[2] = { 0 }; + ssize_t rc; + + if (fd < 0) + return -1; + + rc = read(fd, buf, sizeof(buf)); + close(fd); + return rc == sizeof(buf) ? + memcmp(buf, magic, sizeof(buf)) == 0 : false; +} |