diff options
Diffstat (limited to 'tools/perf/util')
67 files changed, 1286 insertions, 292 deletions
diff --git a/tools/perf/util/Build b/tools/perf/util/Build index ecd9f9ceda77..af72be7f5b3b 100644 --- a/tools/perf/util/Build +++ b/tools/perf/util/Build @@ -10,6 +10,7 @@ libperf-y += evlist.o libperf-y += evsel.o libperf-y += evsel_fprintf.o libperf-y += find_bit.o +libperf-y += get_current_dir_name.o libperf-y += kallsyms.o libperf-y += levenshtein.o libperf-y += llvm-utils.o @@ -76,6 +77,7 @@ libperf-y += stat-shadow.o libperf-y += stat-display.o libperf-y += record.o libperf-y += srcline.o +libperf-y += srccode.o libperf-y += data.o libperf-y += tsc.o libperf-y += cloexec.o diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c index 6936daf89ddd..ac9805e0bc76 100644 --- a/tools/perf/util/annotate.c +++ b/tools/perf/util/annotate.c @@ -134,6 +134,7 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i return 0; } +#include "arch/arc/annotate/instructions.c" #include "arch/arm/annotate/instructions.c" #include "arch/arm64/annotate/instructions.c" #include "arch/x86/annotate/instructions.c" @@ -143,6 +144,10 @@ static int arch__associate_ins_ops(struct arch* arch, const char *name, struct i static struct arch architectures[] = { { + .name = "arc", + .init = arc__annotate_init, + }, + { .name = "arm", .init = arm__annotate_init, }, @@ -1000,6 +1005,7 @@ static unsigned annotation__count_insn(struct annotation *notes, u64 start, u64 static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 end, struct cyc_hist *ch) { unsigned n_insn; + unsigned int cover_insn = 0; u64 offset; n_insn = annotation__count_insn(notes, start, end); @@ -1013,21 +1019,34 @@ static void annotation__count_and_fill(struct annotation *notes, u64 start, u64 for (offset = start; offset <= end; offset++) { struct annotation_line *al = notes->offsets[offset]; - if (al) + if (al && al->ipc == 0.0) { al->ipc = ipc; + cover_insn++; + } + } + + if (cover_insn) { + notes->hit_cycles += ch->cycles; + notes->hit_insn += n_insn * ch->num; + notes->cover_insn += cover_insn; } } } void annotation__compute_ipc(struct annotation *notes, size_t size) { - u64 offset; + s64 offset; if (!notes->src || !notes->src->cycles_hist) return; + notes->total_insn = annotation__count_insn(notes, 0, size - 1); + notes->hit_cycles = 0; + notes->hit_insn = 0; + notes->cover_insn = 0; + pthread_mutex_lock(¬es->lock); - for (offset = 0; offset < size; ++offset) { + for (offset = size - 1; offset >= 0; --offset) { struct cyc_hist *ch; ch = ¬es->src->cycles_hist[offset]; @@ -1758,7 +1777,7 @@ static int symbol__disassemble(struct symbol *sym, struct annotate_args *args) while (!feof(file)) { /* * The source code line number (lineno) needs to be kept in - * accross calls to symbol__parse_objdump_line(), so that it + * across calls to symbol__parse_objdump_line(), so that it * can associate it with the instructions till the next one. * See disasm_line__new() and struct disasm_line::line_nr. */ @@ -2563,6 +2582,22 @@ call_like: disasm_line__scnprintf(dl, bf, size, !notes->options->use_offset); } +static void ipc_coverage_string(char *bf, int size, struct annotation *notes) +{ + double ipc = 0.0, coverage = 0.0; + + if (notes->hit_cycles) + ipc = notes->hit_insn / ((double)notes->hit_cycles); + + if (notes->total_insn) { + coverage = notes->cover_insn * 100.0 / + ((double)notes->total_insn); + } + + scnprintf(bf, size, "(Average IPC: %.2f, IPC Coverage: %.1f%%)", + ipc, coverage); +} + static void __annotation_line__write(struct annotation_line *al, struct annotation *notes, bool first_line, bool current_entry, bool change_color, int width, void *obj, unsigned int percent_type, @@ -2658,6 +2693,11 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati ANNOTATION__MINMAX_CYCLES_WIDTH - 1, "Cycle(min/max)"); } + + if (show_title && !*al->line) { + ipc_coverage_string(bf, sizeof(bf), notes); + obj__printf(obj, "%*s", ANNOTATION__AVG_IPC_WIDTH, bf); + } } obj__printf(obj, " "); @@ -2763,6 +2803,7 @@ int symbol__annotate2(struct symbol *sym, struct map *map, struct perf_evsel *ev notes->nr_events = nr_pcnt; annotation__update_column_widths(notes); + sym->annotate2 = true; return 0; diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 5399ba2321bb..fb6463730ba4 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -64,6 +64,7 @@ bool ins__is_fused(struct arch *arch, const char *ins1, const char *ins2); #define ANNOTATION__IPC_WIDTH 6 #define ANNOTATION__CYCLES_WIDTH 6 #define ANNOTATION__MINMAX_CYCLES_WIDTH 19 +#define ANNOTATION__AVG_IPC_WIDTH 36 struct annotation_options { bool hide_src_code, @@ -262,6 +263,10 @@ struct annotation { pthread_mutex_t lock; u64 max_coverage; u64 start; + u64 hit_cycles; + u64 hit_insn; + unsigned int total_insn; + unsigned int cover_insn; struct annotation_options *options; struct annotation_line **offsets; int nr_events; diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c index 72d5ba2479bf..f69961c4a4f3 100644 --- a/tools/perf/util/auxtrace.c +++ b/tools/perf/util/auxtrace.c @@ -1983,17 +1983,14 @@ static int find_dso_sym(struct dso *dso, const char *sym_name, u64 *start, static int addr_filter__entire_dso(struct addr_filter *filt, struct dso *dso) { - struct symbol *first_sym = dso__first_symbol(dso); - struct symbol *last_sym = dso__last_symbol(dso); - - if (!first_sym || !last_sym) { - pr_err("Failed to determine filter for %s\nNo symbols found.\n", + if (dso__data_file_size(dso, NULL)) { + pr_err("Failed to determine filter for %s\nCannot determine file size.\n", filt->filename); return -EINVAL; } - filt->addr = first_sym->start; - filt->size = last_sym->end - first_sym->start; + filt->addr = 0; + filt->size = dso->data.file_size; return 0; } diff --git a/tools/perf/util/bpf-loader.c b/tools/perf/util/bpf-loader.c index f9ae1a993806..2f3eb6d293ee 100644 --- a/tools/perf/util/bpf-loader.c +++ b/tools/perf/util/bpf-loader.c @@ -99,7 +99,7 @@ struct bpf_object *bpf__prepare_load(const char *filename, bool source) if (err) return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); } else - pr_debug("bpf: successfull builtin compilation\n"); + pr_debug("bpf: successful builtin compilation\n"); obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) @@ -1603,7 +1603,7 @@ struct perf_evsel *bpf__setup_output_event(struct perf_evlist *evlist, const cha op = bpf_map__add_newop(map, NULL); if (IS_ERR(op)) - return ERR_PTR(PTR_ERR(op)); + return ERR_CAST(op); op->op_type = BPF_MAP_OP_SET_EVSEL; op->v.evsel = evsel; } diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c index 5ac157056cdf..1ea8f898f1a1 100644 --- a/tools/perf/util/config.c +++ b/tools/perf/util/config.c @@ -14,6 +14,7 @@ #include "util.h" #include "cache.h" #include <subcmd/exec-cmd.h> +#include "util/event.h" /* proc_map_timeout */ #include "util/hist.h" /* perf_hist_config */ #include "util/llvm-utils.h" /* perf_llvm_config */ #include "config.h" @@ -419,6 +420,9 @@ static int perf_buildid_config(const char *var, const char *value) static int perf_default_core_config(const char *var __maybe_unused, const char *value __maybe_unused) { + if (!strcmp(var, "core.proc-map-timeout")) + proc_map_timeout = strtoul(value, NULL, 10); + /* Add other config variables here. */ return 0; } @@ -811,14 +815,14 @@ int config_error_nonbool(const char *var) void set_buildid_dir(const char *dir) { if (dir) - scnprintf(buildid_dir, MAXPATHLEN-1, "%s", dir); + scnprintf(buildid_dir, MAXPATHLEN, "%s", dir); /* default to $HOME/.debug */ if (buildid_dir[0] == '\0') { char *home = getenv("HOME"); if (home) { - snprintf(buildid_dir, MAXPATHLEN-1, "%s/%s", + snprintf(buildid_dir, MAXPATHLEN, "%s/%s", home, DEBUG_CACHE_DIR); } else { strncpy(buildid_dir, DEBUG_CACHE_DIR, MAXPATHLEN-1); diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 938def6d0bb9..8c155575c6c5 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -36,7 +36,6 @@ struct cs_etm_decoder { void *data; void (*packet_printer)(const char *msg); - bool trace_on; dcd_tree_handle_t dcd_tree; cs_etm_mem_cb_type mem_access; ocsd_datapath_resp_t prev_return; @@ -116,6 +115,19 @@ int cs_etm_decoder__get_packet(struct cs_etm_decoder *decoder, return 1; } +static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params, + ocsd_etmv3_cfg *config) +{ + config->reg_idr = params->etmv3.reg_idr; + config->reg_ctrl = params->etmv3.reg_ctrl; + config->reg_ccer = params->etmv3.reg_ccer; + config->reg_trc_id = params->etmv3.reg_trc_id; + config->arch_ver = ARCH_V7; + config->core_prof = profile_CortexA; + + return 0; +} + static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params, ocsd_etmv4_cfg *config) { @@ -237,10 +249,19 @@ cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params, struct cs_etm_decoder *decoder) { const char *decoder_name; + ocsd_etmv3_cfg config_etmv3; ocsd_etmv4_cfg trace_config_etmv4; void *trace_config; switch (t_params->protocol) { + case CS_ETM_PROTO_ETMV3: + case CS_ETM_PROTO_PTM: + cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3); + decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ? + OCSD_BUILTIN_DCD_ETMV3 : + OCSD_BUILTIN_DCD_PTM; + trace_config = &config_etmv3; + break; case CS_ETM_PROTO_ETMV4i: cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); decoder_name = OCSD_BUILTIN_DCD_ETMV4I; @@ -263,11 +284,12 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder) decoder->tail = 0; decoder->packet_count = 0; for (i = 0; i < MAX_BUFFER; i++) { + decoder->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN; decoder->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR; decoder->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR; + decoder->packet_buffer[i].instr_count = 0; decoder->packet_buffer[i].last_instr_taken_branch = false; - decoder->packet_buffer[i].exc = false; - decoder->packet_buffer[i].exc_ret = false; + decoder->packet_buffer[i].last_instr_size = 0; decoder->packet_buffer[i].cpu = INT_MIN; } } @@ -294,11 +316,13 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, decoder->packet_count++; decoder->packet_buffer[et].sample_type = sample_type; - decoder->packet_buffer[et].exc = false; - decoder->packet_buffer[et].exc_ret = false; + decoder->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN; decoder->packet_buffer[et].cpu = *((int *)inode->priv); decoder->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR; decoder->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR; + decoder->packet_buffer[et].instr_count = 0; + decoder->packet_buffer[et].last_instr_taken_branch = false; + decoder->packet_buffer[et].last_instr_size = 0; if (decoder->packet_count == MAX_BUFFER - 1) return OCSD_RESP_WAIT; @@ -321,8 +345,28 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, packet = &decoder->packet_buffer[decoder->tail]; + switch (elem->isa) { + case ocsd_isa_aarch64: + packet->isa = CS_ETM_ISA_A64; + break; + case ocsd_isa_arm: + packet->isa = CS_ETM_ISA_A32; + break; + case ocsd_isa_thumb2: + packet->isa = CS_ETM_ISA_T32; + break; + case ocsd_isa_tee: + case ocsd_isa_jazelle: + case ocsd_isa_custom: + case ocsd_isa_unknown: + default: + packet->isa = CS_ETM_ISA_UNKNOWN; + } + packet->start_addr = elem->st_addr; packet->end_addr = elem->en_addr; + packet->instr_count = elem->num_instr_range; + switch (elem->last_i_type) { case OCSD_INSTR_BR: case OCSD_INSTR_BR_INDIRECT: @@ -336,15 +380,33 @@ cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, break; } + packet->last_instr_size = elem->last_instr_sz; + return ret; } static ocsd_datapath_resp_t -cs_etm_decoder__buffer_trace_on(struct cs_etm_decoder *decoder, - const uint8_t trace_chan_id) +cs_etm_decoder__buffer_discontinuity(struct cs_etm_decoder *decoder, + const uint8_t trace_chan_id) { return cs_etm_decoder__buffer_packet(decoder, trace_chan_id, - CS_ETM_TRACE_ON); + CS_ETM_DISCONTINUITY); +} + +static ocsd_datapath_resp_t +cs_etm_decoder__buffer_exception(struct cs_etm_decoder *decoder, + const uint8_t trace_chan_id) +{ + return cs_etm_decoder__buffer_packet(decoder, trace_chan_id, + CS_ETM_EXCEPTION); +} + +static ocsd_datapath_resp_t +cs_etm_decoder__buffer_exception_ret(struct cs_etm_decoder *decoder, + const uint8_t trace_chan_id) +{ + return cs_etm_decoder__buffer_packet(decoder, trace_chan_id, + CS_ETM_EXCEPTION_RET); } static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( @@ -359,26 +421,25 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( switch (elem->elem_type) { case OCSD_GEN_TRC_ELEM_UNKNOWN: break; + case OCSD_GEN_TRC_ELEM_EO_TRACE: case OCSD_GEN_TRC_ELEM_NO_SYNC: - decoder->trace_on = false; - break; case OCSD_GEN_TRC_ELEM_TRACE_ON: - resp = cs_etm_decoder__buffer_trace_on(decoder, - trace_chan_id); - decoder->trace_on = true; + resp = cs_etm_decoder__buffer_discontinuity(decoder, + trace_chan_id); break; case OCSD_GEN_TRC_ELEM_INSTR_RANGE: resp = cs_etm_decoder__buffer_range(decoder, elem, trace_chan_id); break; case OCSD_GEN_TRC_ELEM_EXCEPTION: - decoder->packet_buffer[decoder->tail].exc = true; + resp = cs_etm_decoder__buffer_exception(decoder, + trace_chan_id); break; case OCSD_GEN_TRC_ELEM_EXCEPTION_RET: - decoder->packet_buffer[decoder->tail].exc_ret = true; + resp = cs_etm_decoder__buffer_exception_ret(decoder, + trace_chan_id); break; case OCSD_GEN_TRC_ELEM_PE_CONTEXT: - case OCSD_GEN_TRC_ELEM_EO_TRACE: case OCSD_GEN_TRC_ELEM_ADDR_NACC: case OCSD_GEN_TRC_ELEM_TIMESTAMP: case OCSD_GEN_TRC_ELEM_CYCLE_COUNT: @@ -398,11 +459,20 @@ static int cs_etm_decoder__create_etm_packet_decoder( struct cs_etm_decoder *decoder) { const char *decoder_name; + ocsd_etmv3_cfg config_etmv3; ocsd_etmv4_cfg trace_config_etmv4; void *trace_config; u8 csid; switch (t_params->protocol) { + case CS_ETM_PROTO_ETMV3: + case CS_ETM_PROTO_PTM: + cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3); + decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ? + OCSD_BUILTIN_DCD_ETMV3 : + OCSD_BUILTIN_DCD_PTM; + trace_config = &config_etmv3; + break; case CS_ETM_PROTO_ETMV4i: cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4); decoder_name = OCSD_BUILTIN_DCD_ETMV4I; diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h index 612b5755f742..a6407d41598f 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h @@ -23,18 +23,28 @@ struct cs_etm_buffer { }; enum cs_etm_sample_type { - CS_ETM_EMPTY = 0, - CS_ETM_RANGE = 1 << 0, - CS_ETM_TRACE_ON = 1 << 1, + CS_ETM_EMPTY, + CS_ETM_RANGE, + CS_ETM_DISCONTINUITY, + CS_ETM_EXCEPTION, + CS_ETM_EXCEPTION_RET, +}; + +enum cs_etm_isa { + CS_ETM_ISA_UNKNOWN, + CS_ETM_ISA_A64, + CS_ETM_ISA_A32, + CS_ETM_ISA_T32, }; struct cs_etm_packet { enum cs_etm_sample_type sample_type; + enum cs_etm_isa isa; u64 start_addr; u64 end_addr; + u32 instr_count; u8 last_instr_taken_branch; - u8 exc; - u8 exc_ret; + u8 last_instr_size; int cpu; }; @@ -43,6 +53,13 @@ struct cs_etm_queue; typedef u32 (*cs_etm_mem_cb_type)(struct cs_etm_queue *, u64, size_t, u8 *); +struct cs_etmv3_trace_params { + u32 reg_ctrl; + u32 reg_trc_id; + u32 reg_ccer; + u32 reg_idr; +}; + struct cs_etmv4_trace_params { u32 reg_idr0; u32 reg_idr1; @@ -55,6 +72,7 @@ struct cs_etmv4_trace_params { struct cs_etm_trace_params { int protocol; union { + struct cs_etmv3_trace_params etmv3; struct cs_etmv4_trace_params etmv4; }; }; @@ -78,6 +96,7 @@ enum { CS_ETM_PROTO_ETMV3 = 1, CS_ETM_PROTO_ETMV4i, CS_ETM_PROTO_ETMV4d, + CS_ETM_PROTO_PTM, }; enum { diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index 73430b73570d..27a374ddf661 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -31,14 +31,6 @@ #define MAX_TIMESTAMP (~0ULL) -/* - * A64 instructions are always 4 bytes - * - * Only A64 is supported, so can use this constant for converting between - * addresses and instruction counts, calculting offsets etc - */ -#define A64_INSTR_SIZE 4 - struct cs_etm_auxtrace { struct auxtrace auxtrace; struct auxtrace_queues queues; @@ -91,6 +83,19 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm, pid_t tid, u64 time_); +/* PTMs ETMIDR [11:8] set to b0011 */ +#define ETMIDR_PTM_VERSION 0x00000300 + +static u32 cs_etm__get_v7_protocol_version(u32 etmidr) +{ + etmidr &= ETMIDR_PTM_VERSION; + + if (etmidr == ETMIDR_PTM_VERSION) + return CS_ETM_PROTO_PTM; + + return CS_ETM_PROTO_ETMV3; +} + static void cs_etm__packet_dump(const char *pkt_string) { const char *color = PERF_COLOR_BLUE; @@ -122,15 +127,31 @@ static void cs_etm__dump_event(struct cs_etm_auxtrace *etm, /* Use metadata to fill in trace parameters for trace decoder */ t_params = zalloc(sizeof(*t_params) * etm->num_cpu); for (i = 0; i < etm->num_cpu; i++) { - t_params[i].protocol = CS_ETM_PROTO_ETMV4i; - t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; - t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; - t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; - t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; - t_params[i].etmv4.reg_configr = + if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) { + u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR]; + + t_params[i].protocol = + cs_etm__get_v7_protocol_version(etmidr); + t_params[i].etmv3.reg_ctrl = + etm->metadata[i][CS_ETM_ETMCR]; + t_params[i].etmv3.reg_trc_id = + etm->metadata[i][CS_ETM_ETMTRACEIDR]; + } else if (etm->metadata[i][CS_ETM_MAGIC] == + __perf_cs_etmv4_magic) { + t_params[i].protocol = CS_ETM_PROTO_ETMV4i; + t_params[i].etmv4.reg_idr0 = + etm->metadata[i][CS_ETMV4_TRCIDR0]; + t_params[i].etmv4.reg_idr1 = + etm->metadata[i][CS_ETMV4_TRCIDR1]; + t_params[i].etmv4.reg_idr2 = + etm->metadata[i][CS_ETMV4_TRCIDR2]; + t_params[i].etmv4.reg_idr8 = + etm->metadata[i][CS_ETMV4_TRCIDR8]; + t_params[i].etmv4.reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR]; - t_params[i].etmv4.reg_traceidr = + t_params[i].etmv4.reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; + } } /* Set decoder parameters to simply print the trace packets */ @@ -360,15 +381,31 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm, goto out_free; for (i = 0; i < etm->num_cpu; i++) { - t_params[i].protocol = CS_ETM_PROTO_ETMV4i; - t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0]; - t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1]; - t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2]; - t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8]; - t_params[i].etmv4.reg_configr = + if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) { + u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR]; + + t_params[i].protocol = + cs_etm__get_v7_protocol_version(etmidr); + t_params[i].etmv3.reg_ctrl = + etm->metadata[i][CS_ETM_ETMCR]; + t_params[i].etmv3.reg_trc_id = + etm->metadata[i][CS_ETM_ETMTRACEIDR]; + } else if (etm->metadata[i][CS_ETM_MAGIC] == + __perf_cs_etmv4_magic) { + t_params[i].protocol = CS_ETM_PROTO_ETMV4i; + t_params[i].etmv4.reg_idr0 = + etm->metadata[i][CS_ETMV4_TRCIDR0]; + t_params[i].etmv4.reg_idr1 = + etm->metadata[i][CS_ETMV4_TRCIDR1]; + t_params[i].etmv4.reg_idr2 = + etm->metadata[i][CS_ETMV4_TRCIDR2]; + t_params[i].etmv4.reg_idr8 = + etm->metadata[i][CS_ETMV4_TRCIDR8]; + t_params[i].etmv4.reg_configr = etm->metadata[i][CS_ETMV4_TRCCONFIGR]; - t_params[i].etmv4.reg_traceidr = + t_params[i].etmv4.reg_traceidr = etm->metadata[i][CS_ETMV4_TRCTRACEIDR]; + } } /* Set decoder parameters to simply print the trace packets */ @@ -510,53 +547,54 @@ static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq) etmq->last_branch_rb->nr = 0; } -static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) -{ - /* Returns 0 for the CS_ETM_TRACE_ON packet */ - if (packet->sample_type == CS_ETM_TRACE_ON) - return 0; +static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq, + u64 addr) { + u8 instrBytes[2]; + cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes); /* - * The packet records the execution range with an exclusive end address - * - * A64 instructions are constant size, so the last executed - * instruction is A64_INSTR_SIZE before the end address - * Will need to do instruction level decode for T32 instructions as - * they can be variable size (not yet supported). + * T32 instruction size is indicated by bits[15:11] of the first + * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111 + * denote a 32-bit instruction. */ - return packet->end_addr - A64_INSTR_SIZE; + return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2; } static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet) { - /* Returns 0 for the CS_ETM_TRACE_ON packet */ - if (packet->sample_type == CS_ETM_TRACE_ON) + /* Returns 0 for the CS_ETM_DISCONTINUITY packet */ + if (packet->sample_type == CS_ETM_DISCONTINUITY) return 0; return packet->start_addr; } -static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet) +static inline +u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet) { - /* - * Only A64 instructions are currently supported, so can get - * instruction count by dividing. - * Will need to do instruction level decode for T32 instructions as - * they can be variable size (not yet supported). - */ - return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE; + /* Returns 0 for the CS_ETM_DISCONTINUITY packet */ + if (packet->sample_type == CS_ETM_DISCONTINUITY) + return 0; + + return packet->end_addr - packet->last_instr_size; } -static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet, +static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq, + const struct cs_etm_packet *packet, u64 offset) { - /* - * Only A64 instructions are currently supported, so can get - * instruction address by muliplying. - * Will need to do instruction level decode for T32 instructions as - * they can be variable size (not yet supported). - */ - return packet->start_addr + offset * A64_INSTR_SIZE; + if (packet->isa == CS_ETM_ISA_T32) { + u64 addr = packet->start_addr; + + while (offset > 0) { + addr += cs_etm__t32_instr_size(etmq, addr); + offset--; + } + return addr; + } + + /* Assume a 4 byte instruction size (A32/A64) */ + return packet->start_addr + offset * 4; } static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq) @@ -888,9 +926,8 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) struct cs_etm_auxtrace *etm = etmq->etm; struct cs_etm_packet *tmp; int ret; - u64 instrs_executed; + u64 instrs_executed = etmq->packet->instr_count; - instrs_executed = cs_etm__instr_count(etmq->packet); etmq->period_instructions += instrs_executed; /* @@ -920,7 +957,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) * executed, but PC has not advanced to next instruction) */ u64 offset = (instrs_executed - instrs_over - 1); - u64 addr = cs_etm__instr_addr(etmq->packet, offset); + u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset); ret = cs_etm__synth_instruction_sample( etmq, addr, etm->instructions_sample_period); @@ -935,7 +972,7 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) bool generate_sample = false; /* Generate sample for tracing on packet */ - if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON) + if (etmq->prev_packet->sample_type == CS_ETM_DISCONTINUITY) generate_sample = true; /* Generate sample for branch taken packet */ @@ -963,6 +1000,25 @@ static int cs_etm__sample(struct cs_etm_queue *etmq) return 0; } +static int cs_etm__exception(struct cs_etm_queue *etmq) +{ + /* + * When the exception packet is inserted, whether the last instruction + * in previous range packet is taken branch or not, we need to force + * to set 'prev_packet->last_instr_taken_branch' to true. This ensures + * to generate branch sample for the instruction range before the + * exception is trapped to kernel or before the exception returning. + * + * The exception packet includes the dummy address values, so don't + * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful + * for generating instruction and branch samples. + */ + if (etmq->prev_packet->sample_type == CS_ETM_RANGE) + etmq->prev_packet->last_instr_taken_branch = true; + + return 0; +} + static int cs_etm__flush(struct cs_etm_queue *etmq) { int err = 0; @@ -1005,7 +1061,7 @@ static int cs_etm__flush(struct cs_etm_queue *etmq) } swap_packet: - if (etmq->etm->synth_opts.last_branch) { + if (etm->sample_branches || etm->synth_opts.last_branch) { /* * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for * the next incoming packet. @@ -1018,6 +1074,39 @@ swap_packet: return err; } +static int cs_etm__end_block(struct cs_etm_queue *etmq) +{ + int err; + + /* + * It has no new packet coming and 'etmq->packet' contains the stale + * packet which was set at the previous time with packets swapping; + * so skip to generate branch sample to avoid stale packet. + * + * For this case only flush branch stack and generate a last branch + * event for the branches left in the circular buffer at the end of + * the trace. + */ + if (etmq->etm->synth_opts.last_branch && + etmq->prev_packet->sample_type == CS_ETM_RANGE) { + /* + * Use the address of the end of the last reported execution + * range. + */ + u64 addr = cs_etm__last_executed_instr(etmq->prev_packet); + + err = cs_etm__synth_instruction_sample( + etmq, addr, + etmq->period_instructions); + if (err) + return err; + + etmq->period_instructions = 0; + } + + return 0; +} + static int cs_etm__run_decoder(struct cs_etm_queue *etmq) { struct cs_etm_auxtrace *etm = etmq->etm; @@ -1078,7 +1167,16 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) */ cs_etm__sample(etmq); break; - case CS_ETM_TRACE_ON: + case CS_ETM_EXCEPTION: + case CS_ETM_EXCEPTION_RET: + /* + * If the exception packet is coming, + * make sure the previous instruction + * range packet to be handled properly. + */ + cs_etm__exception(etmq); + break; + case CS_ETM_DISCONTINUITY: /* * Discontinuity in trace, flush * previous branch stack @@ -1100,7 +1198,7 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) if (err == 0) /* Flush any remaining branch stack entries */ - err = cs_etm__flush(etmq); + err = cs_etm__end_block(etmq); } return err; diff --git a/tools/perf/util/dso.c b/tools/perf/util/dso.c index bbed90e5d9bb..62c8cf622607 100644 --- a/tools/perf/util/dso.c +++ b/tools/perf/util/dso.c @@ -295,7 +295,7 @@ static int decompress_kmodule(struct dso *dso, const char *name, unlink(tmpbuf); if (pathname && (fd >= 0)) - strncpy(pathname, tmpbuf, len); + strlcpy(pathname, tmpbuf, len); return fd; } @@ -894,7 +894,7 @@ static ssize_t cached_read(struct dso *dso, struct machine *machine, return r; } -static int data_file_size(struct dso *dso, struct machine *machine) +int dso__data_file_size(struct dso *dso, struct machine *machine) { int ret = 0; struct stat st; @@ -943,7 +943,7 @@ out: */ off_t dso__data_size(struct dso *dso, struct machine *machine) { - if (data_file_size(dso, machine)) + if (dso__data_file_size(dso, machine)) return -1; /* For now just estimate dso data size is close to file size */ @@ -953,7 +953,7 @@ off_t dso__data_size(struct dso *dso, struct machine *machine) static ssize_t data_read_offset(struct dso *dso, struct machine *machine, u64 offset, u8 *data, ssize_t size) { - if (data_file_size(dso, machine)) + if (dso__data_file_size(dso, machine)) return -1; /* Check the offset sanity. */ diff --git a/tools/perf/util/dso.h b/tools/perf/util/dso.h index c5380500bed4..8c8a7abe809d 100644 --- a/tools/perf/util/dso.h +++ b/tools/perf/util/dso.h @@ -322,6 +322,7 @@ int dso__data_get_fd(struct dso *dso, struct machine *machine); void dso__data_put_fd(struct dso *dso); void dso__data_close(struct dso *dso); +int dso__data_file_size(struct dso *dso, struct machine *machine); off_t dso__data_size(struct dso *dso, struct machine *machine); ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine, u64 offset, u8 *data, ssize_t size); diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c index 59f38c7693f8..4c23779e271a 100644 --- a/tools/perf/util/env.c +++ b/tools/perf/util/env.c @@ -166,7 +166,7 @@ const char *perf_env__arch(struct perf_env *env) struct utsname uts; char *arch_name; - if (!env) { /* Assume local operation */ + if (!env || !env->arch) { /* Assume local operation */ if (uname(&uts) < 0) return NULL; arch_name = uts.machine; diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index e9c108a6b1c3..937a5a4f71cc 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -25,6 +25,8 @@ #include "asm/bug.h" #include "stat.h" +#define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500 + static const char *perf_event__names[] = { [0] = "TOTAL", [PERF_RECORD_MMAP] = "MMAP", @@ -72,6 +74,8 @@ static const char *perf_ns__names[] = { [CGROUP_NS_INDEX] = "cgroup", }; +unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT; + const char *perf_event__name(unsigned int id) { if (id >= ARRAY_SIZE(perf_event__names)) @@ -323,8 +327,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, - bool mmap_data, - unsigned int proc_map_timeout) + bool mmap_data) { char filename[PATH_MAX]; FILE *fp; @@ -521,8 +524,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, perf_event__handler_t process, struct perf_tool *tool, struct machine *machine, - bool mmap_data, - unsigned int proc_map_timeout) + bool mmap_data) { char filename[PATH_MAX]; DIR *tasks; @@ -548,8 +550,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, */ if (pid == tgid && perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine, mmap_data, - proc_map_timeout)) + process, machine, mmap_data)) return -1; return 0; @@ -598,7 +599,7 @@ static int __event__synthesize_thread(union perf_event *comm_event, if (_pid == pid) { /* process the parent's maps too */ rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid, - process, machine, mmap_data, proc_map_timeout); + process, machine, mmap_data); if (rc) break; } @@ -612,8 +613,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, struct machine *machine, - bool mmap_data, - unsigned int proc_map_timeout) + bool mmap_data) { union perf_event *comm_event, *mmap_event, *fork_event; union perf_event *namespaces_event; @@ -643,7 +643,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, fork_event, namespaces_event, thread_map__pid(threads, thread), 0, process, tool, machine, - mmap_data, proc_map_timeout)) { + mmap_data)) { err = -1; break; } @@ -669,7 +669,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool, fork_event, namespaces_event, comm_event->comm.pid, 0, process, tool, machine, - mmap_data, proc_map_timeout)) { + mmap_data)) { err = -1; break; } @@ -690,7 +690,6 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, - unsigned int proc_map_timeout, struct dirent **dirent, int start, int num) @@ -734,8 +733,7 @@ static int __perf_event__synthesize_threads(struct perf_tool *tool, */ __event__synthesize_thread(comm_event, mmap_event, fork_event, namespaces_event, pid, 1, process, - tool, machine, mmap_data, - proc_map_timeout); + tool, machine, mmap_data); } err = 0; @@ -755,7 +753,6 @@ struct synthesize_threads_arg { perf_event__handler_t process; struct machine *machine; bool mmap_data; - unsigned int proc_map_timeout; struct dirent **dirent; int num; int start; @@ -767,7 +764,7 @@ static void *synthesize_threads_worker(void *arg) __perf_event__synthesize_threads(args->tool, args->process, args->machine, args->mmap_data, - args->proc_map_timeout, args->dirent, + args->dirent, args->start, args->num); return NULL; } @@ -776,7 +773,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, - unsigned int proc_map_timeout, unsigned int nr_threads_synthesize) { struct synthesize_threads_arg *args = NULL; @@ -806,7 +802,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool, if (thread_nr <= 1) { err = __perf_event__synthesize_threads(tool, process, machine, mmap_data, - proc_map_timeout, dirent, base, n); goto free_dirent; } @@ -828,7 +823,6 @@ int perf_event__synthesize_threads(struct perf_tool *tool, args[i].process = process; args[i].machine = machine; args[i].mmap_data = mmap_data; - args[i].proc_map_timeout = proc_map_timeout; args[i].dirent = dirent; } for (i = 0; i < m; i++) { @@ -1577,6 +1571,24 @@ struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, return al->map; } +/* + * For branch stacks or branch samples, the sample cpumode might not be correct + * because it applies only to the sample 'ip' and not necessary to 'addr' or + * branch stack addresses. If possible, use a fallback to deal with those cases. + */ +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr, + struct addr_location *al) +{ + struct map *map = thread__find_map(thread, cpumode, addr, al); + struct machine *machine = thread->mg->machine; + u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr); + + if (map || addr_cpumode == cpumode) + return map; + + return thread__find_map(thread, addr_cpumode, addr, al); +} + struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, u64 addr, struct addr_location *al) { @@ -1586,6 +1598,15 @@ struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, return al->sym; } +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode, + u64 addr, struct addr_location *al) +{ + al->sym = NULL; + if (thread__find_map_fb(thread, cpumode, addr, al)) + al->sym = map__find_symbol(al->map, al->addr); + return al->sym; +} + /* * Callers need to drop the reference to al->thread, obtained in * machine__findnew_thread() @@ -1679,7 +1700,7 @@ bool sample_addr_correlates_sym(struct perf_event_attr *attr) void thread__resolve(struct thread *thread, struct addr_location *al, struct perf_sample *sample) { - thread__find_map(thread, sample->cpumode, sample->addr, al); + thread__find_map_fb(thread, sample->cpumode, sample->addr, al); al->cpu = sample->cpu; al->sym = NULL; diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h index bfa60bcafbde..eb95f3384958 100644 --- a/tools/perf/util/event.h +++ b/tools/perf/util/event.h @@ -669,8 +669,7 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool, int perf_event__synthesize_thread_map(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, - struct machine *machine, bool mmap_data, - unsigned int proc_map_timeout); + struct machine *machine, bool mmap_data); int perf_event__synthesize_thread_map2(struct perf_tool *tool, struct thread_map *threads, perf_event__handler_t process, @@ -682,7 +681,6 @@ int perf_event__synthesize_cpu_map(struct perf_tool *tool, int perf_event__synthesize_threads(struct perf_tool *tool, perf_event__handler_t process, struct machine *machine, bool mmap_data, - unsigned int proc_map_timeout, unsigned int nr_threads_synthesize); int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, perf_event__handler_t process, @@ -797,8 +795,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool, pid_t pid, pid_t tgid, perf_event__handler_t process, struct machine *machine, - bool mmap_data, - unsigned int proc_map_timeout); + bool mmap_data); int perf_event__synthesize_extra_kmaps(struct perf_tool *tool, perf_event__handler_t process, @@ -829,5 +826,6 @@ int perf_event_paranoid(void); extern int sysctl_perf_event_max_stack; extern int sysctl_perf_event_max_contexts_per_stack; +extern unsigned int proc_map_timeout; #endif /* __PERF_RECORD_H */ diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e88e6f9b1463..8c902276d4b4 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -34,6 +34,10 @@ #include <linux/log2.h> #include <linux/err.h> +#ifdef LACKS_SIGQUEUE_PROTOTYPE +int sigqueue(pid_t pid, int sig, const union sigval value); +#endif + #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) #define SID(e, x, y) xyarray__entry(e->sample_id, x, y) @@ -1018,7 +1022,7 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str, */ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, unsigned int auxtrace_pages, - bool auxtrace_overwrite) + bool auxtrace_overwrite, int nr_cblocks) { struct perf_evsel *evsel; const struct cpu_map *cpus = evlist->cpus; @@ -1028,7 +1032,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, * Its value is decided by evsel's write_backward. * So &mp should not be passed through const pointer. */ - struct mmap_params mp; + struct mmap_params mp = { .nr_cblocks = nr_cblocks }; if (!evlist->mmap) evlist->mmap = perf_evlist__alloc_mmap(evlist, false); @@ -1060,7 +1064,7 @@ int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) { - return perf_evlist__mmap_ex(evlist, pages, 0, false); + return perf_evlist__mmap_ex(evlist, pages, 0, false, 0); } int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) @@ -1176,7 +1180,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **e return err; } -int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) +int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter) { struct perf_evsel *evsel; int err = 0; @@ -1193,7 +1197,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter) return err; } -int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) +int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids) { char *filter; int ret = -1; @@ -1214,15 +1218,15 @@ int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t } } - ret = perf_evlist__set_filter(evlist, filter); + ret = perf_evlist__set_tp_filter(evlist, filter); out_free: free(filter); return ret; } -int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid) +int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid) { - return perf_evlist__set_filter_pids(evlist, 1, &pid); + return perf_evlist__set_tp_filter_pids(evlist, 1, &pid); } bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) @@ -1810,3 +1814,30 @@ void perf_evlist__force_leader(struct perf_evlist *evlist) leader->forced_leader = true; } } + +struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evsel_list, + struct perf_evsel *evsel) +{ + struct perf_evsel *c2, *leader; + bool is_open = true; + + leader = evsel->leader; + pr_debug("Weak group for %s/%d failed\n", + leader->name, leader->nr_members); + + /* + * for_each_group_member doesn't work here because it doesn't + * include the first entry. + */ + evlist__for_each_entry(evsel_list, c2) { + if (c2 == evsel) + is_open = false; + if (c2->leader == leader) { + if (is_open) + perf_evsel__close(c2); + c2->leader = c2; + c2->nr_members = 0; + } + } + return leader; +} diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index dc66436add98..868294491194 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h @@ -98,9 +98,9 @@ void __perf_evlist__reset_sample_bit(struct perf_evlist *evlist, #define perf_evlist__reset_sample_bit(evlist, bit) \ __perf_evlist__reset_sample_bit(evlist, PERF_SAMPLE_##bit) -int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter); -int perf_evlist__set_filter_pid(struct perf_evlist *evlist, pid_t pid); -int perf_evlist__set_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); +int perf_evlist__set_tp_filter(struct perf_evlist *evlist, const char *filter); +int perf_evlist__set_tp_filter_pid(struct perf_evlist *evlist, pid_t pid); +int perf_evlist__set_tp_filter_pids(struct perf_evlist *evlist, size_t npids, pid_t *pids); struct perf_evsel * perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id); @@ -162,7 +162,7 @@ unsigned long perf_event_mlock_kb_in_pages(void); int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages, unsigned int auxtrace_pages, - bool auxtrace_overwrite); + bool auxtrace_overwrite, int nr_cblocks); int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages); void perf_evlist__munmap(struct perf_evlist *evlist); @@ -312,4 +312,7 @@ bool perf_evlist__exclude_kernel(struct perf_evlist *evlist); void perf_evlist__force_leader(struct perf_evlist *evlist); +struct perf_evsel *perf_evlist__reset_weak_group(struct perf_evlist *evlist, + struct perf_evsel *evsel); + #endif /* __PERF_EVLIST_H */ diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 6d187059a373..dbc0466db368 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c @@ -956,7 +956,6 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, attr->sample_freq = 0; attr->sample_period = 0; attr->write_backward = 0; - attr->sample_id_all = 0; } if (opts->no_samples) @@ -1093,7 +1092,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts, attr->exclude_user = 1; } - if (evsel->own_cpus) + if (evsel->own_cpus || evsel->unit) evsel->attr.read_format |= PERF_FORMAT_ID; /* diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 3147ca76c6fc..82a289ce8b0c 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h @@ -106,7 +106,7 @@ struct perf_evsel { char *name; double scale; const char *unit; - struct tep_event_format *tp_format; + struct tep_event *tp_format; off_t id_offset; struct perf_stat_evsel *stats; void *priv; @@ -216,7 +216,7 @@ static inline struct perf_evsel *perf_evsel__newtp(const char *sys, const char * struct perf_evsel *perf_evsel__new_cycles(bool precise); -struct tep_event_format *event_format__new(const char *sys, const char *name); +struct tep_event *event_format__new(const char *sys, const char *name); void perf_evsel__init(struct perf_evsel *evsel, struct perf_event_attr *attr, int idx); diff --git a/tools/perf/util/evsel_fprintf.c b/tools/perf/util/evsel_fprintf.c index 0d0a4c6f368b..95ea147f9e18 100644 --- a/tools/perf/util/evsel_fprintf.c +++ b/tools/perf/util/evsel_fprintf.c @@ -173,6 +173,7 @@ int sample__fprintf_callchain(struct perf_sample *sample, int left_alignment, if (!print_oneline) printed += fprintf(fp, "\n"); + /* Add srccode here too? */ if (symbol_conf.bt_stop_list && node->sym && strlist__has_entry(symbol_conf.bt_stop_list, diff --git a/tools/perf/util/get_current_dir_name.c b/tools/perf/util/get_current_dir_name.c new file mode 100644 index 000000000000..267aa609a582 --- /dev/null +++ b/tools/perf/util/get_current_dir_name.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 +// Copyright (C) 2018, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> +// +#ifndef HAVE_GET_CURRENT_DIR_NAME +#include "util.h" +#include <unistd.h> +#include <stdlib.h> +#include <stdlib.h> + +/* Android's 'bionic' library, for one, doesn't have this */ + +char *get_current_dir_name(void) +{ + char pwd[PATH_MAX]; + + return getcwd(pwd, sizeof(pwd)) == NULL ? NULL : strdup(pwd); +} +#endif // HAVE_GET_CURRENT_DIR_NAME diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 4fd45be95a43..dec6d218c31c 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c @@ -988,6 +988,45 @@ static int write_group_desc(struct feat_fd *ff, } /* + * Return the CPU id as a raw string. + * + * Each architecture should provide a more precise id string that + * can be use to match the architecture's "mapfile". + */ +char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) +{ + return NULL; +} + +/* Return zero when the cpuid from the mapfile.csv matches the + * cpuid string generated on this platform. + * Otherwise return non-zero. + */ +int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + + if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", mapcpuid); + return 1; + } + + match = !regexec(&re, cpuid, 1, pmatch, 0); + regfree(&re); + if (match) { + size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); + + /* Verify the entire string matched. */ + if (match_len == strlen(cpuid)) + return 0; + } + return 1; +} + +/* * default get_cpuid(): nothing gets recorded * actual implementation must be in arch/$(SRCARCH)/util/header.c */ @@ -2659,6 +2698,7 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) struct perf_header *header = &session->header; int fd = perf_data__fd(session->data); struct stat st; + time_t stctime; int ret, bit; hd.fp = fp; @@ -2668,7 +2708,8 @@ int perf_header__fprintf_info(struct perf_session *session, FILE *fp, bool full) if (ret == -1) return -1; - fprintf(fp, "# captured on : %s", ctime(&st.st_ctime)); + stctime = st.st_ctime; + fprintf(fp, "# captured on : %s", ctime(&stctime)); fprintf(fp, "# header version : %u\n", header->version); fprintf(fp, "# data offset : %" PRIu64 "\n", header->data_offset); @@ -2759,7 +2800,7 @@ static int perf_header__adds_write(struct perf_header *header, lseek(fd, sec_start, SEEK_SET); /* * may write more than needed due to dropped feature, but - * this is okay, reader will skip the mising entries + * this is okay, reader will skip the missing entries */ err = do_write(&ff, feat_sec, sec_size); if (err < 0) @@ -3229,7 +3270,7 @@ static int read_attr(int fd, struct perf_header *ph, static int perf_evsel__prepare_tracepoint_event(struct perf_evsel *evsel, struct tep_handle *pevent) { - struct tep_event_format *event; + struct tep_event *event; char bf[128]; /* already prepared */ @@ -3544,7 +3585,7 @@ perf_event__synthesize_event_update_unit(struct perf_tool *tool, if (ev == NULL) return -ENOMEM; - strncpy(ev->data, evsel->unit, size); + strlcpy(ev->data, evsel->unit, size + 1); err = process(tool, (union perf_event *)ev, NULL, NULL); free(ev); return err; @@ -3583,7 +3624,7 @@ perf_event__synthesize_event_update_name(struct perf_tool *tool, if (ev == NULL) return -ENOMEM; - strncpy(ev->data, evsel->name, len); + strlcpy(ev->data, evsel->name, len + 1); err = process(tool, (union perf_event*) ev, NULL, NULL); free(ev); return err; diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 828cb9794c76..8aad8330e392 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -1160,7 +1160,7 @@ void hist_entry__delete(struct hist_entry *he) /* * If this is not the last column, then we need to pad it according to the - * pre-calculated max lenght for this column, otherwise don't bother adding + * pre-calculated max length for this column, otherwise don't bother adding * spaces because that would break viewing this with, for instance, 'less', * that would show tons of trailing spaces when a long C++ demangled method * names is sampled. diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 3badd7f1e1b8..664b5eda8d51 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -62,6 +62,7 @@ enum hist_column { HISTC_TRACE, HISTC_SYM_SIZE, HISTC_DSO_SIZE, + HISTC_SYMBOL_IPC, HISTC_NR_COLS, /* Last entry */ }; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c index 58f6a9ceb590..4503f3ca45ab 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-decoder.c @@ -1474,6 +1474,8 @@ static void intel_pt_calc_mtc_timestamp(struct intel_pt_decoder *decoder) decoder->have_calc_cyc_to_tsc = false; intel_pt_calc_cyc_to_tsc(decoder, true); } + + intel_pt_log_to("Setting timestamp", decoder->timestamp); } static void intel_pt_calc_cbr(struct intel_pt_decoder *decoder) @@ -1514,6 +1516,8 @@ static void intel_pt_calc_cyc_timestamp(struct intel_pt_decoder *decoder) decoder->timestamp = timestamp; decoder->timestamp_insn_cnt = 0; + + intel_pt_log_to("Setting timestamp", decoder->timestamp); } /* Walk PSB+ packets when already in sync. */ diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.c b/tools/perf/util/intel-pt-decoder/intel-pt-log.c index e02bc7b166a0..5e64da270f97 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-log.c +++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.c @@ -31,6 +31,11 @@ static FILE *f; static char log_name[MAX_LOG_NAME]; bool intel_pt_enable_logging; +void *intel_pt_log_fp(void) +{ + return f; +} + void intel_pt_log_enable(void) { intel_pt_enable_logging = true; diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-log.h b/tools/perf/util/intel-pt-decoder/intel-pt-log.h index 45b64f93f358..cc084937f701 100644 --- a/tools/perf/util/intel-pt-decoder/intel-pt-log.h +++ b/tools/perf/util/intel-pt-decoder/intel-pt-log.h @@ -22,6 +22,7 @@ struct intel_pt_pkt; +void *intel_pt_log_fp(void); void intel_pt_log_enable(void); void intel_pt_log_disable(void); void intel_pt_log_set_name(const char *name); diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c index 86cc9a64e982..149ff361ca78 100644 --- a/tools/perf/util/intel-pt.c +++ b/tools/perf/util/intel-pt.c @@ -206,6 +206,16 @@ static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf, intel_pt_dump(pt, buf, len); } +static void intel_pt_log_event(union perf_event *event) +{ + FILE *f = intel_pt_log_fp(); + + if (!intel_pt_enable_logging || !f) + return; + + perf_event__fprintf(event, f); +} + static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a, struct auxtrace_buffer *b) { @@ -2010,9 +2020,9 @@ static int intel_pt_process_event(struct perf_session *session, event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) err = intel_pt_context_switch(pt, event, sample); - intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n", - perf_event__name(event->header.type), event->header.type, - sample->cpu, sample->time, timestamp); + intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ", + event->header.type, sample->cpu, sample->time, timestamp); + intel_pt_log_event(event); return err; } diff --git a/tools/perf/util/jitdump.c b/tools/perf/util/jitdump.c index a1863000e972..bf249552a9b0 100644 --- a/tools/perf/util/jitdump.c +++ b/tools/perf/util/jitdump.c @@ -38,7 +38,7 @@ struct jit_buf_desc { uint64_t sample_type; size_t bufsize; FILE *in; - bool needs_bswap; /* handles cross-endianess */ + bool needs_bswap; /* handles cross-endianness */ bool use_arch_timestamp; void *debug_data; void *unwinding_data; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 8f36ce813bc5..6fcb3bce0442 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -137,7 +137,7 @@ struct machine *machine__new_kallsyms(void) struct machine *machine = machine__new_host(); /* * FIXME: - * 1) We should switch to machine__load_kallsyms(), i.e. not explicitely + * 1) We should switch to machine__load_kallsyms(), i.e. not explicitly * ask for not using the kcore parsing code, once this one is fixed * to create a map per module. */ @@ -2493,15 +2493,13 @@ int machines__for_each_thread(struct machines *machines, int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, perf_event__handler_t process, bool data_mmap, - unsigned int proc_map_timeout, unsigned int nr_threads_synthesize) { if (target__has_task(target)) - return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout); + return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap); else if (target__has_cpu(target)) return perf_event__synthesize_threads(tool, process, machine, data_mmap, - proc_map_timeout, nr_threads_synthesize); /* command specified */ return 0; @@ -2592,6 +2590,33 @@ int machine__get_kernel_start(struct machine *machine) return err; } +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr) +{ + u8 addr_cpumode = cpumode; + bool kernel_ip; + + if (!machine->single_address_space) + goto out; + + kernel_ip = machine__kernel_ip(machine, addr); + switch (cpumode) { + case PERF_RECORD_MISC_KERNEL: + case PERF_RECORD_MISC_USER: + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_KERNEL : + PERF_RECORD_MISC_USER; + break; + case PERF_RECORD_MISC_GUEST_KERNEL: + case PERF_RECORD_MISC_GUEST_USER: + addr_cpumode = kernel_ip ? PERF_RECORD_MISC_GUEST_KERNEL : + PERF_RECORD_MISC_GUEST_USER; + break; + default: + break; + } +out: + return addr_cpumode; +} + struct dso *machine__findnew_dso(struct machine *machine, const char *filename) { return dsos__findnew(&machine->dsos, filename); diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index d856b85862e2..a5d1da60f751 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -42,6 +42,7 @@ struct machine { u16 id_hdr_size; bool comm_exec; bool kptr_restrict_warned; + bool single_address_space; char *root_dir; char *mmap_name; struct threads threads[THREADS__TABLE_SIZE]; @@ -99,6 +100,8 @@ static inline bool machine__kernel_ip(struct machine *machine, u64 ip) return ip >= kernel_start; } +u8 machine__addr_cpumode(struct machine *machine, u8 cpumode, u64 addr); + struct thread *machine__find_thread(struct machine *machine, pid_t pid, pid_t tid); struct comm *machine__thread_exec_comm(struct machine *machine, @@ -247,17 +250,14 @@ int machines__for_each_thread(struct machines *machines, int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool, struct target *target, struct thread_map *threads, perf_event__handler_t process, bool data_mmap, - unsigned int proc_map_timeout, unsigned int nr_threads_synthesize); static inline int machine__synthesize_threads(struct machine *machine, struct target *target, struct thread_map *threads, bool data_mmap, - unsigned int proc_map_timeout, unsigned int nr_threads_synthesize) { return __machine__synthesize_threads(machine, NULL, target, threads, perf_event__process, data_mmap, - proc_map_timeout, nr_threads_synthesize); } diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index 354e54550d2b..6751301a755c 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -19,8 +19,10 @@ #include "srcline.h" #include "namespaces.h" #include "unwind.h" +#include "srccode.h" static void __maps__insert(struct maps *maps, struct map *map); +static void __maps__insert_name(struct maps *maps, struct map *map); static inline int is_anon_memory(const char *filename, u32 flags) { @@ -420,6 +422,54 @@ int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, return ret; } +int map__fprintf_srccode(struct map *map, u64 addr, + FILE *fp, + struct srccode_state *state) +{ + char *srcfile; + int ret = 0; + unsigned line; + int len; + char *srccode; + + if (!map || !map->dso) + return 0; + srcfile = get_srcline_split(map->dso, + map__rip_2objdump(map, addr), + &line); + if (!srcfile) + return 0; + + /* Avoid redundant printing */ + if (state && + state->srcfile && + !strcmp(state->srcfile, srcfile) && + state->line == line) { + free(srcfile); + return 0; + } + + srccode = find_sourceline(srcfile, line, &len); + if (!srccode) + goto out_free_line; + + ret = fprintf(fp, "|%-8d %.*s", line, len, srccode); + state->srcfile = srcfile; + state->line = line; + return ret; + +out_free_line: + free(srcfile); + return ret; +} + + +void srccode_state_free(struct srccode_state *state) +{ + zfree(&state->srcfile); + state->line = 0; +} + /** * map__rip_2objdump - convert symbol start address to objdump address. * @map: memory map @@ -496,6 +546,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip) static void maps__init(struct maps *maps) { maps->entries = RB_ROOT; + maps->names = RB_ROOT; init_rwsem(&maps->lock); } @@ -664,6 +715,7 @@ size_t map_groups__fprintf(struct map_groups *mg, FILE *fp) static void __map_groups__insert(struct map_groups *mg, struct map *map) { __maps__insert(&mg->maps, map); + __maps__insert_name(&mg->maps, map); map->groups = mg; } @@ -824,10 +876,34 @@ static void __maps__insert(struct maps *maps, struct map *map) map__get(map); } +static void __maps__insert_name(struct maps *maps, struct map *map) +{ + struct rb_node **p = &maps->names.rb_node; + struct rb_node *parent = NULL; + struct map *m; + int rc; + + while (*p != NULL) { + parent = *p; + m = rb_entry(parent, struct map, rb_node_name); + rc = strcmp(m->dso->short_name, map->dso->short_name); + if (rc < 0) + p = &(*p)->rb_left; + else if (rc > 0) + p = &(*p)->rb_right; + else + return; + } + rb_link_node(&map->rb_node_name, parent, p); + rb_insert_color(&map->rb_node_name, &maps->names); + map__get(map); +} + void maps__insert(struct maps *maps, struct map *map) { down_write(&maps->lock); __maps__insert(maps, map); + __maps__insert_name(maps, map); up_write(&maps->lock); } @@ -846,19 +922,18 @@ void maps__remove(struct maps *maps, struct map *map) struct map *maps__find(struct maps *maps, u64 ip) { - struct rb_node **p, *parent = NULL; + struct rb_node *p; struct map *m; down_read(&maps->lock); - p = &maps->entries.rb_node; - while (*p != NULL) { - parent = *p; - m = rb_entry(parent, struct map, rb_node); + p = maps->entries.rb_node; + while (p != NULL) { + m = rb_entry(p, struct map, rb_node); if (ip < m->start) - p = &(*p)->rb_left; + p = p->rb_left; else if (ip >= m->end) - p = &(*p)->rb_right; + p = p->rb_right; else goto out; } diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h index e0f327b51e66..09282aa45c80 100644 --- a/tools/perf/util/map.h +++ b/tools/perf/util/map.h @@ -25,6 +25,7 @@ struct map { struct rb_node rb_node; struct list_head node; }; + struct rb_node rb_node_name; u64 start; u64 end; bool erange_warned; @@ -57,6 +58,7 @@ struct kmap { struct maps { struct rb_root entries; + struct rb_root names; struct rw_semaphore lock; }; @@ -172,6 +174,22 @@ char *map__srcline(struct map *map, u64 addr, struct symbol *sym); int map__fprintf_srcline(struct map *map, u64 addr, const char *prefix, FILE *fp); +struct srccode_state { + char *srcfile; + unsigned line; +}; + +static inline void srccode_state_init(struct srccode_state *state) +{ + state->srcfile = NULL; + state->line = 0; +} + +void srccode_state_free(struct srccode_state *state); + +int map__fprintf_srccode(struct map *map, u64 addr, + FILE *fp, struct srccode_state *state); + int map__load(struct map *map); struct symbol *map__find_symbol(struct map *map, u64 addr); struct symbol *map__find_symbol_by_name(struct map *map, const char *name); diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index cdb95b3a1213..8fc39311a30d 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c @@ -153,8 +153,158 @@ void __weak auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp __mayb { } +#ifdef HAVE_AIO_SUPPORT +static int perf_mmap__aio_mmap(struct perf_mmap *map, struct mmap_params *mp) +{ + int delta_max, i, prio; + + map->aio.nr_cblocks = mp->nr_cblocks; + if (map->aio.nr_cblocks) { + map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); + if (!map->aio.aiocb) { + pr_debug2("failed to allocate aiocb for data buffer, error %m\n"); + return -1; + } + map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); + if (!map->aio.cblocks) { + pr_debug2("failed to allocate cblocks for data buffer, error %m\n"); + return -1; + } + map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); + if (!map->aio.data) { + pr_debug2("failed to allocate data buffer, error %m\n"); + return -1; + } + delta_max = sysconf(_SC_AIO_PRIO_DELTA_MAX); + for (i = 0; i < map->aio.nr_cblocks; ++i) { + map->aio.data[i] = malloc(perf_mmap__mmap_len(map)); + if (!map->aio.data[i]) { + pr_debug2("failed to allocate data buffer area, error %m"); + return -1; + } + /* + * Use cblock.aio_fildes value different from -1 + * to denote started aio write operation on the + * cblock so it requires explicit record__aio_sync() + * call prior the cblock may be reused again. + */ + map->aio.cblocks[i].aio_fildes = -1; + /* + * Allocate cblocks with priority delta to have + * faster aio write system calls because queued requests + * are kept in separate per-prio queues and adding + * a new request will iterate thru shorter per-prio + * list. Blocks with numbers higher than + * _SC_AIO_PRIO_DELTA_MAX go with priority 0. + */ + prio = delta_max - i; + map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; + } + } + + return 0; +} + +static void perf_mmap__aio_munmap(struct perf_mmap *map) +{ + int i; + + for (i = 0; i < map->aio.nr_cblocks; ++i) + zfree(&map->aio.data[i]); + if (map->aio.data) + zfree(&map->aio.data); + zfree(&map->aio.cblocks); + zfree(&map->aio.aiocb); +} + +int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, + int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), + off_t *off) +{ + u64 head = perf_mmap__read_head(md); + unsigned char *data = md->base + page_size; + unsigned long size, size0 = 0; + void *buf; + int rc = 0; + + rc = perf_mmap__read_init(md); + if (rc < 0) + return (rc == -EAGAIN) ? 0 : -1; + + /* + * md->base data is copied into md->data[idx] buffer to + * release space in the kernel buffer as fast as possible, + * thru perf_mmap__consume() below. + * + * That lets the kernel to proceed with storing more + * profiling data into the kernel buffer earlier than other + * per-cpu kernel buffers are handled. + * + * Coping can be done in two steps in case the chunk of + * profiling data crosses the upper bound of the kernel buffer. + * In this case we first move part of data from md->start + * till the upper bound and then the reminder from the + * beginning of the kernel buffer till the end of + * the data chunk. + */ + + size = md->end - md->start; + + if ((md->start & md->mask) + size != (md->end & md->mask)) { + buf = &data[md->start & md->mask]; + size = md->mask + 1 - (md->start & md->mask); + md->start += size; + memcpy(md->aio.data[idx], buf, size); + size0 = size; + } + + buf = &data[md->start & md->mask]; + size = md->end - md->start; + md->start += size; + memcpy(md->aio.data[idx] + size0, buf, size); + + /* + * Increment md->refcount to guard md->data[idx] buffer + * from premature deallocation because md object can be + * released earlier than aio write request started + * on mmap->data[idx] is complete. + * + * perf_mmap__put() is done at record__aio_complete() + * after started request completion. + */ + perf_mmap__get(md); + + md->prev = head; + perf_mmap__consume(md); + + rc = push(to, &md->aio.cblocks[idx], md->aio.data[idx], size0 + size, *off); + if (!rc) { + *off += size0 + size; + } else { + /* + * Decrement md->refcount back if aio write + * operation failed to start. + */ + perf_mmap__put(md); + } + + return rc; +} +#else +static int perf_mmap__aio_mmap(struct perf_mmap *map __maybe_unused, + struct mmap_params *mp __maybe_unused) +{ + return 0; +} + +static void perf_mmap__aio_munmap(struct perf_mmap *map __maybe_unused) +{ +} +#endif + void perf_mmap__munmap(struct perf_mmap *map) { + perf_mmap__aio_munmap(map); if (map->base != NULL) { munmap(map->base, perf_mmap__mmap_len(map)); map->base = NULL; @@ -197,7 +347,7 @@ int perf_mmap__mmap(struct perf_mmap *map, struct mmap_params *mp, int fd, int c &mp->auxtrace_mp, map->base, fd)) return -1; - return 0; + return perf_mmap__aio_mmap(map, mp); } static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) diff --git a/tools/perf/util/mmap.h b/tools/perf/util/mmap.h index cc5e2d6d17a9..aeb6942fdb00 100644 --- a/tools/perf/util/mmap.h +++ b/tools/perf/util/mmap.h @@ -6,9 +6,13 @@ #include <linux/types.h> #include <linux/ring_buffer.h> #include <stdbool.h> +#ifdef HAVE_AIO_SUPPORT +#include <aio.h> +#endif #include "auxtrace.h" #include "event.h" +struct aiocb; /** * struct perf_mmap - perf's ring buffer mmap details * @@ -26,6 +30,14 @@ struct perf_mmap { bool overwrite; struct auxtrace_mmap auxtrace_mmap; char event_copy[PERF_SAMPLE_MAX_SIZE] __aligned(8); +#ifdef HAVE_AIO_SUPPORT + struct { + void **data; + struct aiocb *cblocks; + struct aiocb **aiocb; + int nr_cblocks; + } aio; +#endif }; /* @@ -57,7 +69,7 @@ enum bkw_mmap_state { }; struct mmap_params { - int prot, mask; + int prot, mask, nr_cblocks; struct auxtrace_mmap_params auxtrace_mp; }; @@ -85,6 +97,18 @@ union perf_event *perf_mmap__read_event(struct perf_mmap *map); int perf_mmap__push(struct perf_mmap *md, void *to, int push(struct perf_mmap *map, void *to, void *buf, size_t size)); +#ifdef HAVE_AIO_SUPPORT +int perf_mmap__aio_push(struct perf_mmap *md, void *to, int idx, + int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off), + off_t *off); +#else +static inline int perf_mmap__aio_push(struct perf_mmap *md __maybe_unused, void *to __maybe_unused, int idx __maybe_unused, + int push(void *to, struct aiocb *cblock, void *buf, size_t size, off_t off) __maybe_unused, + off_t *off __maybe_unused) +{ + return 0; +} +#endif size_t perf_mmap__mmap_len(struct perf_mmap *map); diff --git a/tools/perf/util/namespaces.c b/tools/perf/util/namespaces.c index cf8bd123cf73..aed170bd4384 100644 --- a/tools/perf/util/namespaces.c +++ b/tools/perf/util/namespaces.c @@ -18,6 +18,7 @@ #include <stdio.h> #include <string.h> #include <unistd.h> +#include <asm/bug.h> struct namespaces *namespaces__new(struct namespaces_event *event) { @@ -186,6 +187,7 @@ void nsinfo__mountns_enter(struct nsinfo *nsi, char curpath[PATH_MAX]; int oldns = -1; int newns = -1; + char *oldcwd = NULL; if (nc == NULL) return; @@ -199,9 +201,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi, if (snprintf(curpath, PATH_MAX, "/proc/self/ns/mnt") >= PATH_MAX) return; + oldcwd = get_current_dir_name(); + if (!oldcwd) + return; + oldns = open(curpath, O_RDONLY); if (oldns < 0) - return; + goto errout; newns = open(nsi->mntns_path, O_RDONLY); if (newns < 0) @@ -210,11 +216,13 @@ void nsinfo__mountns_enter(struct nsinfo *nsi, if (setns(newns, CLONE_NEWNS) < 0) goto errout; + nc->oldcwd = oldcwd; nc->oldns = oldns; nc->newns = newns; return; errout: + free(oldcwd); if (oldns > -1) close(oldns); if (newns > -1) @@ -223,11 +231,16 @@ errout: void nsinfo__mountns_exit(struct nscookie *nc) { - if (nc == NULL || nc->oldns == -1 || nc->newns == -1) + if (nc == NULL || nc->oldns == -1 || nc->newns == -1 || !nc->oldcwd) return; setns(nc->oldns, CLONE_NEWNS); + if (nc->oldcwd) { + WARN_ON_ONCE(chdir(nc->oldcwd)); + zfree(&nc->oldcwd); + } + if (nc->oldns > -1) { close(nc->oldns); nc->oldns = -1; diff --git a/tools/perf/util/namespaces.h b/tools/perf/util/namespaces.h index cae1a9a39722..d5f46c09ea31 100644 --- a/tools/perf/util/namespaces.h +++ b/tools/perf/util/namespaces.h @@ -38,6 +38,7 @@ struct nsinfo { struct nscookie { int oldns; int newns; + char *oldcwd; }; int nsinfo__init(struct nsinfo *nsi); diff --git a/tools/perf/util/ordered-events.c b/tools/perf/util/ordered-events.c index 1904e7f6ec84..897589507d97 100644 --- a/tools/perf/util/ordered-events.c +++ b/tools/perf/util/ordered-events.c @@ -219,13 +219,12 @@ int ordered_events__queue(struct ordered_events *oe, union perf_event *event, return 0; } -static int __ordered_events__flush(struct ordered_events *oe) +static int do_flush(struct ordered_events *oe, bool show_progress) { struct list_head *head = &oe->events; struct ordered_event *tmp, *iter; u64 limit = oe->next_flush; u64 last_ts = oe->last ? oe->last->timestamp : 0ULL; - bool show_progress = limit == ULLONG_MAX; struct ui_progress prog; int ret; @@ -263,7 +262,8 @@ static int __ordered_events__flush(struct ordered_events *oe) return 0; } -int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) +static int __ordered_events__flush(struct ordered_events *oe, enum oe_flush how, + u64 timestamp) { static const char * const str[] = { "NONE", @@ -272,12 +272,16 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) "HALF ", }; int err; + bool show_progress = false; if (oe->nr_events == 0) return 0; switch (how) { case OE_FLUSH__FINAL: + show_progress = true; + __fallthrough; + case OE_FLUSH__TOP: oe->next_flush = ULLONG_MAX; break; @@ -298,6 +302,11 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) break; } + case OE_FLUSH__TIME: + oe->next_flush = timestamp; + show_progress = false; + break; + case OE_FLUSH__ROUND: case OE_FLUSH__NONE: default: @@ -308,7 +317,7 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) str[how], oe->nr_events); pr_oe_time(oe->max_timestamp, "max_timestamp\n"); - err = __ordered_events__flush(oe); + err = do_flush(oe, show_progress); if (!err) { if (how == OE_FLUSH__ROUND) @@ -324,7 +333,29 @@ int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) return err; } -void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver) +int ordered_events__flush(struct ordered_events *oe, enum oe_flush how) +{ + return __ordered_events__flush(oe, how, 0); +} + +int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp) +{ + return __ordered_events__flush(oe, OE_FLUSH__TIME, timestamp); +} + +u64 ordered_events__first_time(struct ordered_events *oe) +{ + struct ordered_event *event; + + if (list_empty(&oe->events)) + return 0; + + event = list_first_entry(&oe->events, struct ordered_event, list); + return event->timestamp; +} + +void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver, + void *data) { INIT_LIST_HEAD(&oe->events); INIT_LIST_HEAD(&oe->cache); @@ -332,6 +363,7 @@ void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t d oe->max_alloc_size = (u64) -1; oe->cur_alloc_size = 0; oe->deliver = deliver; + oe->data = data; } static void @@ -375,5 +407,5 @@ void ordered_events__reinit(struct ordered_events *oe) ordered_events__free(oe); memset(oe, '\0', sizeof(*oe)); - ordered_events__init(oe, old_deliver); + ordered_events__init(oe, old_deliver, oe->data); } diff --git a/tools/perf/util/ordered-events.h b/tools/perf/util/ordered-events.h index 1338d5c345dc..0920fb0ec6cc 100644 --- a/tools/perf/util/ordered-events.h +++ b/tools/perf/util/ordered-events.h @@ -18,6 +18,8 @@ enum oe_flush { OE_FLUSH__FINAL, OE_FLUSH__ROUND, OE_FLUSH__HALF, + OE_FLUSH__TOP, + OE_FLUSH__TIME, }; struct ordered_events; @@ -47,15 +49,19 @@ struct ordered_events { enum oe_flush last_flush_type; u32 nr_unordered_events; bool copy_on_queue; + void *data; }; int ordered_events__queue(struct ordered_events *oe, union perf_event *event, u64 timestamp, u64 file_offset); void ordered_events__delete(struct ordered_events *oe, struct ordered_event *event); int ordered_events__flush(struct ordered_events *oe, enum oe_flush how); -void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver); +int ordered_events__flush_time(struct ordered_events *oe, u64 timestamp); +void ordered_events__init(struct ordered_events *oe, ordered_events__deliver_t deliver, + void *data); void ordered_events__free(struct ordered_events *oe); void ordered_events__reinit(struct ordered_events *oe); +u64 ordered_events__first_time(struct ordered_events *oe); static inline void ordered_events__set_alloc_size(struct ordered_events *oe, u64 size) diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index 59be3466d64d..920e1e6551dd 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c @@ -2462,7 +2462,7 @@ restart: if (!name_only && strlen(syms->alias)) snprintf(name, MAX_NAME_LEN, "%s OR %s", syms->symbol, syms->alias); else - strncpy(name, syms->symbol, MAX_NAME_LEN); + strlcpy(name, syms->symbol, MAX_NAME_LEN); evt_list[evt_i] = strdup(name); if (evt_list[evt_i] == NULL) diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 7799788f662f..11a234740632 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -145,7 +145,7 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char * int fd, ret = -1; char path[PATH_MAX]; - snprintf(path, PATH_MAX, "%s/%s.scale", dir, name); + scnprintf(path, PATH_MAX, "%s/%s.scale", dir, name); fd = open(path, O_RDONLY); if (fd == -1) @@ -175,7 +175,7 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n ssize_t sret; int fd; - snprintf(path, PATH_MAX, "%s/%s.unit", dir, name); + scnprintf(path, PATH_MAX, "%s/%s.unit", dir, name); fd = open(path, O_RDONLY); if (fd == -1) @@ -205,7 +205,7 @@ perf_pmu__parse_per_pkg(struct perf_pmu_alias *alias, char *dir, char *name) char path[PATH_MAX]; int fd; - snprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name); + scnprintf(path, PATH_MAX, "%s/%s.per-pkg", dir, name); fd = open(path, O_RDONLY); if (fd == -1) @@ -223,7 +223,7 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias, char path[PATH_MAX]; int fd; - snprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name); + scnprintf(path, PATH_MAX, "%s/%s.snapshot", dir, name); fd = open(path, O_RDONLY); if (fd == -1) @@ -655,45 +655,6 @@ static int is_arm_pmu_core(const char *name) return 0; } -/* - * Return the CPU id as a raw string. - * - * Each architecture should provide a more precise id string that - * can be use to match the architecture's "mapfile". - */ -char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) -{ - return NULL; -} - -/* Return zero when the cpuid from the mapfile.csv matches the - * cpuid string generated on this platform. - * Otherwise return non-zero. - */ -int strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) -{ - regex_t re; - regmatch_t pmatch[1]; - int match; - - if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { - /* Warn unable to generate match particular string. */ - pr_info("Invalid regular expression %s\n", mapcpuid); - return 1; - } - - match = !regexec(&re, cpuid, 1, pmatch, 0); - regfree(&re); - if (match) { - size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); - - /* Verify the entire string matched. */ - if (match_len == strlen(cpuid)) - return 0; - } - return 1; -} - static char *perf_pmu__getcpuid(struct perf_pmu *pmu) { char *cpuid; @@ -773,7 +734,7 @@ static void pmu_add_cpu_aliases(struct list_head *head, struct perf_pmu *pmu) if (!is_arm_pmu_core(name)) { pname = pe->pmu ? pe->pmu : "cpu"; - if (strncmp(pname, name, strlen(pname))) + if (strcmp(pname, name)) continue; } diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index e86f8be89157..18a59fba97ff 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -692,7 +692,7 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs, return ret; for (i = 0; i < ntevs && ret >= 0; i++) { - /* point.address is the addres of point.symbol + point.offset */ + /* point.address is the address of point.symbol + point.offset */ tevs[i].point.address -= stext; tevs[i].point.module = strdup(exec); if (!tevs[i].point.module) { @@ -3062,7 +3062,7 @@ static int try_to_find_absolute_address(struct perf_probe_event *pev, /* * Give it a '0x' leading symbol name. * In __add_probe_trace_events, a NULL symbol is interpreted as - * invalud. + * invalid. */ if (asprintf(&tp->symbol, "0x%lx", tp->address) < 0) goto errout; diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c index aac7817d9e14..0b1195cad0e5 100644 --- a/tools/perf/util/probe-file.c +++ b/tools/perf/util/probe-file.c @@ -424,7 +424,7 @@ static int probe_cache__open(struct probe_cache *pcache, const char *target, if (target && build_id_cache__cached(target)) { /* This is a cached buildid */ - strncpy(sbuildid, target, SBUILD_ID_SIZE); + strlcpy(sbuildid, target, SBUILD_ID_SIZE); dir_name = build_id_cache__linkname(sbuildid, NULL, 0); goto found; } diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c index 50150dfc0cdf..47628e85c5eb 100644 --- a/tools/perf/util/python.c +++ b/tools/perf/util/python.c @@ -386,7 +386,7 @@ get_tracepoint_field(struct pyrf_event *pevent, PyObject *attr_name) struct tep_format_field *field; if (!evsel->tp_format) { - struct tep_event_format *tp_format; + struct tep_event *tp_format; tp_format = trace_event__tp_format_id(evsel->attr.config); if (!tp_format) @@ -1240,7 +1240,7 @@ static struct { static PyObject *pyrf__tracepoint(struct pyrf_evsel *pevsel, PyObject *args, PyObject *kwargs) { - struct tep_event_format *tp_format; + struct tep_event *tp_format; static char *kwlist[] = { "sys", "name", NULL }; char *sys = NULL; char *name = NULL; diff --git a/tools/perf/util/s390-cpumsf.c b/tools/perf/util/s390-cpumsf.c index a2eeebbfb25f..68b2570304ec 100644 --- a/tools/perf/util/s390-cpumsf.c +++ b/tools/perf/util/s390-cpumsf.c @@ -506,7 +506,7 @@ static int s390_cpumsf_samples(struct s390_cpumsf_queue *sfq, u64 *ts) aux_ts = get_trailer_time(buf); if (!aux_ts) { pr_err("[%#08" PRIx64 "] Invalid AUX trailer entry TOD clock base\n", - sfq->buffer->data_offset); + (s64)sfq->buffer->data_offset); aux_ts = ~0ULL; goto out; } diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c index 89cb887648f9..b93f36b887b5 100644 --- a/tools/perf/util/scripting-engines/trace-event-perl.c +++ b/tools/perf/util/scripting-engines/trace-event-perl.c @@ -189,7 +189,7 @@ static void define_flag_field(const char *ev_name, LEAVE; } -static void define_event_symbols(struct tep_event_format *event, +static void define_event_symbols(struct tep_event *event, const char *ev_name, struct tep_print_arg *args) { @@ -338,7 +338,7 @@ static void perl_process_tracepoint(struct perf_sample *sample, struct addr_location *al) { struct thread *thread = al->thread; - struct tep_event_format *event = evsel->tp_format; + struct tep_event *event = evsel->tp_format; struct tep_format_field *field; static char handler[256]; unsigned long long val; @@ -537,7 +537,7 @@ static int perl_stop_script(void) static int perl_generate_script(struct tep_handle *pevent, const char *outfile) { - struct tep_event_format *event = NULL; + struct tep_event *event = NULL; struct tep_format_field *f; char fname[PATH_MAX]; int not_first, count; diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c index 69aa93d4ee99..87ef16a1b17e 100644 --- a/tools/perf/util/scripting-engines/trace-event-python.c +++ b/tools/perf/util/scripting-engines/trace-event-python.c @@ -264,7 +264,7 @@ static void define_field(enum tep_print_arg_type field_type, Py_DECREF(t); } -static void define_event_symbols(struct tep_event_format *event, +static void define_event_symbols(struct tep_event *event, const char *ev_name, struct tep_print_arg *args) { @@ -332,7 +332,7 @@ static void define_event_symbols(struct tep_event_format *event, define_event_symbols(event, ev_name, args->next); } -static PyObject *get_field_numeric_entry(struct tep_event_format *event, +static PyObject *get_field_numeric_entry(struct tep_event *event, struct tep_format_field *field, void *data) { bool is_array = field->flags & TEP_FIELD_IS_ARRAY; @@ -494,14 +494,14 @@ static PyObject *python_process_brstack(struct perf_sample *sample, pydict_set_item_string_decref(pyelem, "cycles", PyLong_FromUnsignedLongLong(br->entries[i].flags.cycles)); - thread__find_map(thread, sample->cpumode, - br->entries[i].from, &al); + thread__find_map_fb(thread, sample->cpumode, + br->entries[i].from, &al); dsoname = get_dsoname(al.map); pydict_set_item_string_decref(pyelem, "from_dsoname", _PyUnicode_FromString(dsoname)); - thread__find_map(thread, sample->cpumode, - br->entries[i].to, &al); + thread__find_map_fb(thread, sample->cpumode, + br->entries[i].to, &al); dsoname = get_dsoname(al.map); pydict_set_item_string_decref(pyelem, "to_dsoname", _PyUnicode_FromString(dsoname)); @@ -576,14 +576,14 @@ static PyObject *python_process_brstacksym(struct perf_sample *sample, if (!pyelem) Py_FatalError("couldn't create Python dictionary"); - thread__find_symbol(thread, sample->cpumode, - br->entries[i].from, &al); + thread__find_symbol_fb(thread, sample->cpumode, + br->entries[i].from, &al); get_symoff(al.sym, &al, true, bf, sizeof(bf)); pydict_set_item_string_decref(pyelem, "from", _PyUnicode_FromString(bf)); - thread__find_symbol(thread, sample->cpumode, - br->entries[i].to, &al); + thread__find_symbol_fb(thread, sample->cpumode, + br->entries[i].to, &al); get_symoff(al.sym, &al, true, bf, sizeof(bf)); pydict_set_item_string_decref(pyelem, "to", _PyUnicode_FromString(bf)); @@ -790,7 +790,7 @@ static void python_process_tracepoint(struct perf_sample *sample, struct perf_evsel *evsel, struct addr_location *al) { - struct tep_event_format *event = evsel->tp_format; + struct tep_event *event = evsel->tp_format; PyObject *handler, *context, *t, *obj = NULL, *callchain; PyObject *dict = NULL, *all_entries_dict = NULL; static char handler_name[256]; @@ -1590,7 +1590,7 @@ static int python_stop_script(void) static int python_generate_script(struct tep_handle *pevent, const char *outfile) { - struct tep_event_format *event = NULL; + struct tep_event *event = NULL; struct tep_format_field *f; char fname[PATH_MAX]; int not_first, count; diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c index 7d2c8ce6cfad..78a067777144 100644 --- a/tools/perf/util/session.c +++ b/tools/perf/util/session.c @@ -24,6 +24,7 @@ #include "thread.h" #include "thread-stack.h" #include "stat.h" +#include "arch/common.h" static int perf_session__deliver_event(struct perf_session *session, union perf_event *event, @@ -125,7 +126,8 @@ struct perf_session *perf_session__new(struct perf_data *data, session->tool = tool; INIT_LIST_HEAD(&session->auxtrace_index); machines__init(&session->machines); - ordered_events__init(&session->ordered_events, ordered_events__deliver_event); + ordered_events__init(&session->ordered_events, + ordered_events__deliver_event, NULL); if (data) { if (perf_data__open(data)) @@ -150,6 +152,9 @@ struct perf_session *perf_session__new(struct perf_data *data, session->machines.host.env = &perf_env; } + session->machines.host.single_address_space = + perf_env__single_address_space(session->machines.host.env); + if (!data || perf_data__is_write(data)) { /* * In O_RDONLY mode this will be performed when reading the diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index f96c005b3c41..6c1a83768eb0 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -13,6 +13,7 @@ #include "strlist.h" #include <traceevent/event-parse.h> #include "mem-events.h" +#include "annotate.h" #include <linux/kernel.h> regex_t parent_regex; @@ -36,7 +37,7 @@ enum sort_mode sort__mode = SORT_MODE__NORMAL; * -t, --field-separator * * option, that uses a special separator character and don't pad with spaces, - * replacing all occurances of this separator in symbol names (and other + * replacing all occurrences of this separator in symbol names (and other * output) with a '.' character, that thus it's the only non valid separator. */ static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...) @@ -422,6 +423,64 @@ struct sort_entry sort_srcline_to = { .se_width_idx = HISTC_SRCLINE_TO, }; +static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf, + size_t size, unsigned int width) +{ + + struct symbol *sym = he->ms.sym; + struct map *map = he->ms.map; + struct perf_evsel *evsel = hists_to_evsel(he->hists); + struct annotation *notes; + double ipc = 0.0, coverage = 0.0; + char tmp[64]; + + if (!sym) + return repsep_snprintf(bf, size, "%-*s", width, "-"); + + if (!sym->annotate2 && symbol__annotate2(sym, map, evsel, + &annotation__default_options, NULL) < 0) { + return 0; + } + + notes = symbol__annotation(sym); + + if (notes->hit_cycles) + ipc = notes->hit_insn / ((double)notes->hit_cycles); + + if (notes->total_insn) { + coverage = notes->cover_insn * 100.0 / + ((double)notes->total_insn); + } + + snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage); + return repsep_snprintf(bf, size, "%-*s", width, tmp); +} + +struct sort_entry sort_sym_ipc = { + .se_header = "IPC [IPC Coverage]", + .se_cmp = sort__sym_cmp, + .se_snprintf = hist_entry__sym_ipc_snprintf, + .se_width_idx = HISTC_SYMBOL_IPC, +}; + +static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he + __maybe_unused, + char *bf, size_t size, + unsigned int width) +{ + char tmp[64]; + + snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-"); + return repsep_snprintf(bf, size, "%-*s", width, tmp); +} + +struct sort_entry sort_sym_ipc_null = { + .se_header = "IPC [IPC Coverage]", + .se_cmp = sort__sym_cmp, + .se_snprintf = hist_entry__sym_ipc_null_snprintf, + .se_width_idx = HISTC_SYMBOL_IPC, +}; + /* --sort srcfile */ static char no_srcfile[1]; @@ -1574,6 +1633,7 @@ static struct sort_dimension common_sort_dimensions[] = { DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size), DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size), DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id), + DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null), }; #undef DIM @@ -1591,6 +1651,7 @@ static struct sort_dimension bstack_sort_dimensions[] = { DIM(SORT_CYCLES, "cycles", sort_cycles), DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from), DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to), + DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc), }; #undef DIM diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h index a97cf8e6be86..130fe37fe2df 100644 --- a/tools/perf/util/sort.h +++ b/tools/perf/util/sort.h @@ -229,6 +229,7 @@ enum sort_type { SORT_SYM_SIZE, SORT_DSO_SIZE, SORT_CGROUP_ID, + SORT_SYM_IPC_NULL, /* branch stack specific sort keys */ __SORT_BRANCH_STACK, @@ -242,6 +243,7 @@ enum sort_type { SORT_CYCLES, SORT_SRCLINE_FROM, SORT_SRCLINE_TO, + SORT_SYM_IPC, /* memory mode specific sort keys */ __SORT_MEMORY_MODE, diff --git a/tools/perf/util/srccode.c b/tools/perf/util/srccode.c new file mode 100644 index 000000000000..fcc8630f6dff --- /dev/null +++ b/tools/perf/util/srccode.c @@ -0,0 +1,186 @@ +/* + * Manage printing of source lines + * Copyright (c) 2017, Intel Corporation. + * Author: Andi Kleen + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ +#include "linux/list.h" +#include <stdlib.h> +#include <sys/mman.h> +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <assert.h> +#include <string.h> +#include "srccode.h" +#include "debug.h" +#include "util.h" + +#define MAXSRCCACHE (32*1024*1024) +#define MAXSRCFILES 64 +#define SRC_HTAB_SZ 64 + +struct srcfile { + struct hlist_node hash_nd; + struct list_head nd; + char *fn; + char **lines; + char *map; + unsigned numlines; + size_t maplen; +}; + +static struct hlist_head srcfile_htab[SRC_HTAB_SZ]; +static LIST_HEAD(srcfile_list); +static long map_total_sz; +static int num_srcfiles; + +static unsigned shash(unsigned char *s) +{ + unsigned h = 0; + while (*s) + h = 65599 * h + *s++; + return h ^ (h >> 16); +} + +static int countlines(char *map, int maplen) +{ + int numl; + char *end = map + maplen; + char *p = map; + + if (maplen == 0) + return 0; + numl = 0; + while (p < end && (p = memchr(p, '\n', end - p)) != NULL) { + numl++; + p++; + } + if (p < end) + numl++; + return numl; +} + +static void fill_lines(char **lines, int maxline, char *map, int maplen) +{ + int l; + char *end = map + maplen; + char *p = map; + + if (maplen == 0 || maxline == 0) + return; + l = 0; + lines[l++] = map; + while (p < end && (p = memchr(p, '\n', end - p)) != NULL) { + if (l >= maxline) + return; + lines[l++] = ++p; + } + if (p < end) + lines[l] = p; +} + +static void free_srcfile(struct srcfile *sf) +{ + list_del(&sf->nd); + hlist_del(&sf->hash_nd); + map_total_sz -= sf->maplen; + munmap(sf->map, sf->maplen); + free(sf->lines); + free(sf->fn); + free(sf); + num_srcfiles--; +} + +static struct srcfile *find_srcfile(char *fn) +{ + struct stat st; + struct srcfile *h; + int fd; + unsigned long sz; + unsigned hval = shash((unsigned char *)fn) % SRC_HTAB_SZ; + + hlist_for_each_entry (h, &srcfile_htab[hval], hash_nd) { + if (!strcmp(fn, h->fn)) { + /* Move to front */ + list_del(&h->nd); + list_add(&h->nd, &srcfile_list); + return h; + } + } + + /* Only prune if there is more than one entry */ + while ((num_srcfiles > MAXSRCFILES || map_total_sz > MAXSRCCACHE) && + srcfile_list.next != &srcfile_list) { + assert(!list_empty(&srcfile_list)); + h = list_entry(srcfile_list.prev, struct srcfile, nd); + free_srcfile(h); + } + + fd = open(fn, O_RDONLY); + if (fd < 0 || fstat(fd, &st) < 0) { + pr_debug("cannot open source file %s\n", fn); + return NULL; + } + + h = malloc(sizeof(struct srcfile)); + if (!h) + return NULL; + + h->fn = strdup(fn); + if (!h->fn) + goto out_h; + + h->maplen = st.st_size; + sz = (h->maplen + page_size - 1) & ~(page_size - 1); + h->map = mmap(NULL, sz, PROT_READ, MAP_SHARED, fd, 0); + close(fd); + if (h->map == (char *)-1) { + pr_debug("cannot mmap source file %s\n", fn); + goto out_fn; + } + h->numlines = countlines(h->map, h->maplen); + h->lines = calloc(h->numlines, sizeof(char *)); + if (!h->lines) + goto out_map; + fill_lines(h->lines, h->numlines, h->map, h->maplen); + list_add(&h->nd, &srcfile_list); + hlist_add_head(&h->hash_nd, &srcfile_htab[hval]); + map_total_sz += h->maplen; + num_srcfiles++; + return h; + +out_map: + munmap(h->map, sz); +out_fn: + free(h->fn); +out_h: + free(h); + return NULL; +} + +/* Result is not 0 terminated */ +char *find_sourceline(char *fn, unsigned line, int *lenp) +{ + char *l, *p; + struct srcfile *sf = find_srcfile(fn); + if (!sf) + return NULL; + line--; + if (line >= sf->numlines) + return NULL; + l = sf->lines[line]; + if (!l) + return NULL; + p = memchr(l, '\n', sf->map + sf->maplen - l); + *lenp = p - l; + return l; +} diff --git a/tools/perf/util/srccode.h b/tools/perf/util/srccode.h new file mode 100644 index 000000000000..e500a746d5f1 --- /dev/null +++ b/tools/perf/util/srccode.h @@ -0,0 +1,7 @@ +#ifndef SRCCODE_H +#define SRCCODE_H 1 + +/* Result is not 0 terminated */ +char *find_sourceline(char *fn, unsigned line, int *lenp); + +#endif diff --git a/tools/perf/util/srcline.c b/tools/perf/util/srcline.c index e767c4a9d4d2..dc86597d0cc4 100644 --- a/tools/perf/util/srcline.c +++ b/tools/perf/util/srcline.c @@ -548,6 +548,34 @@ out: return srcline; } +/* Returns filename and fills in line number in line */ +char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line) +{ + char *file = NULL; + const char *dso_name; + + if (!dso->has_srcline) + goto out; + + dso_name = dso__name(dso); + if (dso_name == NULL) + goto out; + + if (!addr2line(dso_name, addr, &file, line, dso, true, NULL, NULL)) + goto out; + + dso->a2l_fails = 0; + return file; + +out: + if (dso->a2l_fails && ++dso->a2l_fails > A2L_FAIL_LIMIT) { + dso->has_srcline = 0; + dso__free_a2l(dso); + } + + return NULL; +} + void free_srcline(char *srcline) { if (srcline && strcmp(srcline, SRCLINE_UNKNOWN) != 0) diff --git a/tools/perf/util/srcline.h b/tools/perf/util/srcline.h index b2bb5502fd62..5762212dc342 100644 --- a/tools/perf/util/srcline.h +++ b/tools/perf/util/srcline.h @@ -16,6 +16,7 @@ char *__get_srcline(struct dso *dso, u64 addr, struct symbol *sym, bool show_sym, bool show_addr, bool unwind_inlines, u64 ip); void free_srcline(char *srcline); +char *get_srcline_split(struct dso *dso, u64 addr, unsigned *line); /* insert the srcline into the DSO, which will take ownership */ void srcline__tree_insert(struct rb_root *tree, u64 addr, char *srcline); diff --git a/tools/perf/util/stat-display.c b/tools/perf/util/stat-display.c index e7b4c44ebb62..665ee374fc01 100644 --- a/tools/perf/util/stat-display.c +++ b/tools/perf/util/stat-display.c @@ -59,6 +59,15 @@ static void print_noise(struct perf_stat_config *config, print_noise_pct(config, stddev_stats(&ps->res_stats[0]), avg); } +static void print_cgroup(struct perf_stat_config *config, struct perf_evsel *evsel) +{ + if (nr_cgroups) { + const char *cgrp_name = evsel->cgrp ? evsel->cgrp->name : ""; + fprintf(config->output, "%s%s", config->csv_sep, cgrp_name); + } +} + + static void aggr_printout(struct perf_stat_config *config, struct perf_evsel *evsel, int id, int nr) { @@ -336,8 +345,7 @@ static void abs_printout(struct perf_stat_config *config, fprintf(output, "%-*s", config->csv_output ? 0 : 25, perf_evsel__name(evsel)); - if (evsel->cgrp) - fprintf(output, "%s%s", config->csv_sep, evsel->cgrp->name); + print_cgroup(config, evsel); } static bool is_mixed_hw_group(struct perf_evsel *counter) @@ -431,9 +439,7 @@ static void printout(struct perf_stat_config *config, int id, int nr, config->csv_output ? 0 : -25, perf_evsel__name(counter)); - if (counter->cgrp) - fprintf(config->output, "%s%s", - config->csv_sep, counter->cgrp->name); + print_cgroup(config, counter); if (!config->csv_output) pm(config, &os, NULL, NULL, "", 0); diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c index 8ad32763cfff..3c22c58b3e90 100644 --- a/tools/perf/util/stat-shadow.c +++ b/tools/perf/util/stat-shadow.c @@ -209,12 +209,12 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count, int cpu, struct runtime_stat *st) { int ctx = evsel_context(counter); + u64 count_ns = count; count *= counter->scale; - if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK) || - perf_evsel__match(counter, SOFTWARE, SW_CPU_CLOCK)) - update_runtime_stat(st, STAT_NSECS, 0, cpu, count); + if (perf_evsel__is_clock(counter)) + update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns); else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count); else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c index 1cbada2dc6be..f735ee038713 100644 --- a/tools/perf/util/svghelper.c +++ b/tools/perf/util/svghelper.c @@ -334,7 +334,7 @@ static char *cpu_model(void) if (file) { while (fgets(buf, 255, file)) { if (strstr(buf, "model name")) { - strncpy(cpu_m, &buf[13], 255); + strlcpy(cpu_m, &buf[13], 255); break; } } diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index d188b7588152..01f2c7385e38 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1537,17 +1537,6 @@ int dso__load(struct dso *dso, struct map *map) dso->adjust_symbols = 0; if (perfmap) { - struct stat st; - - if (lstat(map_path, &st) < 0) - goto out; - - if (!symbol_conf.force && st.st_uid && (st.st_uid != geteuid())) { - pr_warning("File %s not owned by current user or root, " - "ignoring it (use -f to override).\n", map_path); - goto out; - } - ret = dso__load_perf_map(map_path, dso); dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT : DSO_BINARY_TYPE__NOT_FOUND; @@ -1680,11 +1669,22 @@ struct map *map_groups__find_by_name(struct map_groups *mg, const char *name) { struct maps *maps = &mg->maps; struct map *map; + struct rb_node *node; down_read(&maps->lock); - for (map = maps__first(maps); map; map = map__next(map)) { - if (map->dso && strcmp(map->dso->short_name, name) == 0) + for (node = maps->names.rb_node; node; ) { + int rc; + + map = rb_entry(node, struct map, rb_node_name); + + rc = strcmp(map->dso->short_name, name); + if (rc < 0) + node = node->rb_left; + else if (rc > 0) + node = node->rb_right; + else + goto out_unlock; } diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h index d026d215bdc6..14d9d438e7e2 100644 --- a/tools/perf/util/symbol.h +++ b/tools/perf/util/symbol.h @@ -63,6 +63,7 @@ struct symbol { u8 ignore:1; u8 inlined:1; u8 arch_sym; + bool annotate2; char name[0]; }; diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c index 3d9ed7d0e281..c83372329f89 100644 --- a/tools/perf/util/thread.c +++ b/tools/perf/util/thread.c @@ -64,6 +64,7 @@ struct thread *thread__new(pid_t pid, pid_t tid) RB_CLEAR_NODE(&thread->rb_node); /* Thread holds first ref to nsdata. */ thread->nsinfo = nsinfo__new(pid); + srccode_state_init(&thread->srccode_state); } return thread; @@ -103,6 +104,7 @@ void thread__delete(struct thread *thread) unwind__finish_access(thread); nsinfo__zput(thread->nsinfo); + srccode_state_free(&thread->srccode_state); exit_rwsem(&thread->namespaces_lock); exit_rwsem(&thread->comm_lock); diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h index 30e2b4c165fe..712dd48cc0ca 100644 --- a/tools/perf/util/thread.h +++ b/tools/perf/util/thread.h @@ -8,6 +8,7 @@ #include <unistd.h> #include <sys/types.h> #include "symbol.h" +#include "map.h" #include <strlist.h> #include <intlist.h> #include "rwsem.h" @@ -38,6 +39,7 @@ struct thread { void *priv; struct thread_stack *ts; struct nsinfo *nsinfo; + struct srccode_state srccode_state; #ifdef HAVE_LIBUNWIND_SUPPORT void *addr_space; struct unwind_libunwind_ops *unwind_libunwind_ops; @@ -96,9 +98,13 @@ struct thread *thread__main_thread(struct machine *machine, struct thread *threa struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr, struct addr_location *al); +struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr, + struct addr_location *al); struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode, u64 addr, struct addr_location *al); +struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode, + u64 addr, struct addr_location *al); void thread__find_cpumode_addr_location(struct thread *thread, u64 addr, struct addr_location *al); diff --git a/tools/perf/util/top.c b/tools/perf/util/top.c index 8e517def925b..4c8da8c4435f 100644 --- a/tools/perf/util/top.c +++ b/tools/perf/util/top.c @@ -46,8 +46,9 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) samples_per_sec; ret = SNPRINTF(bf, size, " PerfTop:%8.0f irqs/sec kernel:%4.1f%%" - " exact: %4.1f%% [", samples_per_sec, - ksamples_percent, esamples_percent); + " exact: %4.1f%% lost: %" PRIu64 "/%" PRIu64 " drop: %" PRIu64 "/%" PRIu64 " [", + samples_per_sec, ksamples_percent, esamples_percent, + top->lost, top->lost_total, top->drop, top->drop_total); } else { float us_samples_per_sec = top->us_samples / top->delay_secs; float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs; @@ -106,6 +107,7 @@ size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size) top->evlist->cpus->nr > 1 ? "s" : ""); } + perf_top__reset_sample_counters(top); return ret; } @@ -113,5 +115,5 @@ void perf_top__reset_sample_counters(struct perf_top *top) { top->samples = top->us_samples = top->kernel_samples = top->exact_samples = top->guest_kernel_samples = - top->guest_us_samples = 0; + top->guest_us_samples = top->lost = top->drop = 0; } diff --git a/tools/perf/util/top.h b/tools/perf/util/top.h index 9add1f72ce95..19f95eaf75c8 100644 --- a/tools/perf/util/top.h +++ b/tools/perf/util/top.h @@ -22,7 +22,7 @@ struct perf_top { * Symbols will be added here in perf_event__process_sample and will * get out after decayed. */ - u64 samples; + u64 samples, lost, lost_total, drop, drop_total; u64 kernel_samples, us_samples; u64 exact_samples; u64 guest_us_samples, guest_kernel_samples; @@ -40,6 +40,14 @@ struct perf_top { const char *sym_filter; float min_percent; unsigned int nr_threads_synthesize; + + struct { + struct ordered_events *in; + struct ordered_events data[2]; + bool rotate; + pthread_mutex_t mutex; + pthread_cond_t cond; + } qe; }; #define CONSOLE_CLEAR "[H[2J" diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c index 32e558a65af3..ad74be1f0e42 100644 --- a/tools/perf/util/trace-event-parse.c +++ b/tools/perf/util/trace-event-parse.c @@ -33,7 +33,7 @@ static int get_common_field(struct scripting_context *context, int *offset, int *size, const char *type) { struct tep_handle *pevent = context->pevent; - struct tep_event_format *event; + struct tep_event *event; struct tep_format_field *field; if (!*size) { @@ -95,7 +95,7 @@ int common_pc(struct scripting_context *context) } unsigned long long -raw_field_value(struct tep_event_format *event, const char *name, void *data) +raw_field_value(struct tep_event *event, const char *name, void *data) { struct tep_format_field *field; unsigned long long val; @@ -109,12 +109,12 @@ raw_field_value(struct tep_event_format *event, const char *name, void *data) return val; } -unsigned long long read_size(struct tep_event_format *event, void *ptr, int size) +unsigned long long read_size(struct tep_event *event, void *ptr, int size) { return tep_read_number(event->pevent, ptr, size); } -void event_format__fprintf(struct tep_event_format *event, +void event_format__fprintf(struct tep_event *event, int cpu, void *data, int size, FILE *fp) { struct tep_record record; @@ -131,7 +131,7 @@ void event_format__fprintf(struct tep_event_format *event, trace_seq_destroy(&s); } -void event_format__print(struct tep_event_format *event, +void event_format__print(struct tep_event *event, int cpu, void *data, int size) { return event_format__fprintf(event, cpu, data, size, stdout); @@ -190,12 +190,12 @@ int parse_event_file(struct tep_handle *pevent, return tep_parse_event(pevent, buf, size, sys); } -struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, - struct tep_event_format *event) +struct tep_event *trace_find_next_event(struct tep_handle *pevent, + struct tep_event *event) { static int idx; int events_count; - struct tep_event_format *all_events; + struct tep_event *all_events; all_events = tep_get_first_event(pevent); events_count = tep_get_events_count(pevent); diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c index 76f12c705ef9..efe2f58cff4e 100644 --- a/tools/perf/util/trace-event-read.c +++ b/tools/perf/util/trace-event-read.c @@ -102,7 +102,7 @@ static unsigned int read4(struct tep_handle *pevent) if (do_read(&data, 4) < 0) return 0; - return __tep_data2host4(pevent, data); + return tep_read_number(pevent, &data, 4); } static unsigned long long read8(struct tep_handle *pevent) @@ -111,7 +111,7 @@ static unsigned long long read8(struct tep_handle *pevent) if (do_read(&data, 8) < 0) return 0; - return __tep_data2host8(pevent, data); + return tep_read_number(pevent, &data, 8); } static char *read_string(void) diff --git a/tools/perf/util/trace-event.c b/tools/perf/util/trace-event.c index 95664b2f771e..cbe0dd758e3a 100644 --- a/tools/perf/util/trace-event.c +++ b/tools/perf/util/trace-event.c @@ -72,12 +72,12 @@ void trace_event__cleanup(struct trace_event *t) /* * Returns pointer with encoded error via <linux/err.h> interface. */ -static struct tep_event_format* +static struct tep_event* tp_format(const char *sys, const char *name) { char *tp_dir = get_events_file(sys); struct tep_handle *pevent = tevent.pevent; - struct tep_event_format *event = NULL; + struct tep_event *event = NULL; char path[PATH_MAX]; size_t size; char *data; @@ -102,7 +102,7 @@ tp_format(const char *sys, const char *name) /* * Returns pointer with encoded error via <linux/err.h> interface. */ -struct tep_event_format* +struct tep_event* trace_event__tp_format(const char *sys, const char *name) { if (!tevent_initialized && trace_event__init2()) @@ -111,7 +111,7 @@ trace_event__tp_format(const char *sys, const char *name) return tp_format(sys, name); } -struct tep_event_format *trace_event__tp_format_id(int id) +struct tep_event *trace_event__tp_format_id(int id) { if (!tevent_initialized && trace_event__init2()) return ERR_PTR(-ENOMEM); diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index f024d73bfc40..d9b0a942090a 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h @@ -22,17 +22,17 @@ int trace_event__init(struct trace_event *t); void trace_event__cleanup(struct trace_event *t); int trace_event__register_resolver(struct machine *machine, tep_func_resolver_t *func); -struct tep_event_format* +struct tep_event* trace_event__tp_format(const char *sys, const char *name); -struct tep_event_format *trace_event__tp_format_id(int id); +struct tep_event *trace_event__tp_format_id(int id); int bigendian(void); -void event_format__fprintf(struct tep_event_format *event, +void event_format__fprintf(struct tep_event *event, int cpu, void *data, int size, FILE *fp); -void event_format__print(struct tep_event_format *event, +void event_format__print(struct tep_event *event, int cpu, void *data, int size); int parse_ftrace_file(struct tep_handle *pevent, char *buf, unsigned long size); @@ -40,7 +40,7 @@ int parse_event_file(struct tep_handle *pevent, char *buf, unsigned long size, char *sys); unsigned long long -raw_field_value(struct tep_event_format *event, const char *name, void *data); +raw_field_value(struct tep_event *event, const char *name, void *data); void parse_proc_kallsyms(struct tep_handle *pevent, char *file, unsigned int size); void parse_ftrace_printk(struct tep_handle *pevent, char *file, unsigned int size); @@ -48,9 +48,9 @@ void parse_saved_cmdline(struct tep_handle *pevent, char *file, unsigned int siz ssize_t trace_report(int fd, struct trace_event *tevent, bool repipe); -struct tep_event_format *trace_find_next_event(struct tep_handle *pevent, - struct tep_event_format *event); -unsigned long long read_size(struct tep_event_format *event, void *ptr, int size); +struct tep_event *trace_find_next_event(struct tep_handle *pevent, + struct tep_event *event); +unsigned long long read_size(struct tep_event *event, void *ptr, int size); unsigned long long eval_flag(const char *flag); int read_tracing_data(int fd, struct list_head *pattrs); diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h index 14508ee7707a..ece040b799f6 100644 --- a/tools/perf/util/util.h +++ b/tools/perf/util/util.h @@ -59,6 +59,10 @@ int fetch_kernel_version(unsigned int *puint, const char *perf_tip(const char *dirpath); +#ifndef HAVE_GET_CURRENT_DIR_NAME +char *get_current_dir_name(void); +#endif + #ifndef HAVE_SCHED_GETCPU_SUPPORT int sched_getcpu(void); #endif |