diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-02-17 13:39:47 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-02-17 13:39:47 +0300 |
commit | 11737ca9e3b9d84448fa405a80980aa9957bcee8 (patch) | |
tree | 33b79a0c5a5c96344fe6f72e3a37104385a90ea7 /tools/perf/util | |
parent | 7057bb975dab827997e0ca9dd92cafef0856b0cc (diff) | |
parent | 21316ac6803d4a1aadd74b896db8d60a92cd1140 (diff) | |
download | linux-11737ca9e3b9d84448fa405a80980aa9957bcee8.tar.xz |
Merge tag 'perf-core-for-mingo-4.17-20180216' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux into perf/core
Pull perf/core improvements and fixes from Arnaldo Carvalho de Melo:
- Fix wrong jump arrow in systems with branch records with cycles,
i.e. Intel's >= Skylake (Jin Yao)
- Fix 'perf record --per-thread' problem introduced when
implementing 'perf stat --per-thread (Jin Yao)
- Use arch__compare_symbol_names() to fix 'perf test vmlinux',
that was using strcmp(symbol names) while the dso routines
doing symbol lookups used the arch overridable one, making
this test fail in architectures that overrided that function
with something other than strcmp() (Jiri Olsa)
- Add 'perf script --show-round-event' to display
PERF_RECORD_FINISHED_ROUND entries (Jiri Olsa)
- Fix dwarf unwind for stripped binaries in 'perf test' (Jiri Olsa)
- Use ordered_events for 'perf report --tasks', otherwise we may get
artifacts when PERF_RECORD_FORK gets processed before PERF_RECORD_COMM
(when they got recorded in different CPUs) (Jiri Olsa)
- Add support to display group output for non group events, i.e.
now when one uses 'perf report --group' on a perf.data file
recorded without explicitly grouping events with {} (e.g.
"perf record -e '{cycles,instructions}'" get the same output
that would produce, i.e. see all those non-grouped events in
multiple columns, at the same time (Jiri Olsa)
- Skip non-address kallsyms entries, e.g. '(null)' for !root (Jiri Olsa)
- Kernel maps fixes wrt perf.data(report) versus live system (top)
(Jiri Olsa)
- Fix memory corruption when using 'perf record -j call -g -a <application>'
followed by 'perf report --branch-history' (Jiri Olsa)
- ARM CoreSight fixes (Mathieu Poirier)
- Add inject capability for CoreSight Traces (Robert Waker)
- Update documentation for use of 'perf' + ARM CoreSight (Robert Walker)
- Man pages fixes (Sangwon Hong, Jaecheol Shin)
- Fix some 'perf test' cases on s/390 and x86_64 (some backtraces
changed with a glibc update) (Thomas Richter)
- Add detailed CPUID info in the 'perf.data' headers for s/390 to
then use it in 'perf annotate' (Thomas Richter)
- Add '--interval-count N' to 'perf stat', to use with -I, i.e.
'perf stat -I 1000 --interval-count 2' will show stats every
1000ms, two times (yuzhoujian)
- Add 'perf stat --timeout Nms', that will run for that many
milliseconds and then stop, printing the counters (yuzhoujian)
- Fix description for 'perf report --mem-modex (Andi Kleen)
- Use a wildcard to remove the vfs_getname probe in the
'perf test' shell based test cases (Arnaldo Carvalho de Melo)
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'tools/perf/util')
-rw-r--r-- | tools/perf/util/build-id.c | 10 | ||||
-rw-r--r-- | tools/perf/util/cs-etm-decoder/cs-etm-decoder.c | 74 | ||||
-rw-r--r-- | tools/perf/util/cs-etm-decoder/cs-etm-decoder.h | 2 | ||||
-rw-r--r-- | tools/perf/util/cs-etm.c | 478 | ||||
-rw-r--r-- | tools/perf/util/event.c | 16 | ||||
-rw-r--r-- | tools/perf/util/evlist.c | 21 | ||||
-rw-r--r-- | tools/perf/util/header.h | 1 | ||||
-rw-r--r-- | tools/perf/util/hist.c | 4 | ||||
-rw-r--r-- | tools/perf/util/hist.h | 1 | ||||
-rw-r--r-- | tools/perf/util/machine.c | 145 | ||||
-rw-r--r-- | tools/perf/util/machine.h | 6 | ||||
-rw-r--r-- | tools/perf/util/pmu.c | 47 | ||||
-rw-r--r-- | tools/perf/util/sort.c | 7 | ||||
-rw-r--r-- | tools/perf/util/stat.h | 2 | ||||
-rw-r--r-- | tools/perf/util/symbol.c | 13 | ||||
-rw-r--r-- | tools/perf/util/syscalltbl.c | 8 | ||||
-rw-r--r-- | tools/perf/util/thread_map.c | 4 | ||||
-rw-r--r-- | tools/perf/util/thread_map.h | 2 |
18 files changed, 638 insertions, 203 deletions
diff --git a/tools/perf/util/build-id.c b/tools/perf/util/build-id.c index 7f8553630c4d..537eadd81914 100644 --- a/tools/perf/util/build-id.c +++ b/tools/perf/util/build-id.c @@ -316,7 +316,6 @@ static int machine__write_buildid_table(struct machine *machine, struct feat_fd *fd) { int err = 0; - char nm[PATH_MAX]; struct dso *pos; u16 kmisc = PERF_RECORD_MISC_KERNEL, umisc = PERF_RECORD_MISC_USER; @@ -338,9 +337,8 @@ static int machine__write_buildid_table(struct machine *machine, name = pos->short_name; name_len = pos->short_name_len; } else if (dso__is_kcore(pos)) { - machine__mmap_name(machine, nm, sizeof(nm)); - name = nm; - name_len = strlen(nm); + name = machine->mmap_name; + name_len = strlen(name); } else { name = pos->long_name; name_len = pos->long_name_len; @@ -813,12 +811,10 @@ static int dso__cache_build_id(struct dso *dso, struct machine *machine) bool is_kallsyms = dso__is_kallsyms(dso); bool is_vdso = dso__is_vdso(dso); const char *name = dso->long_name; - char nm[PATH_MAX]; if (dso__is_kcore(dso)) { is_kallsyms = true; - machine__mmap_name(machine, nm, sizeof(nm)); - name = nm; + name = machine->mmap_name; } return build_id_cache__add_b(dso->build_id, sizeof(dso->build_id), name, dso->nsinfo, is_kallsyms, is_vdso); diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c index 1fb01849f1c7..640af88331b4 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c @@ -78,6 +78,8 @@ int cs_etm_decoder__reset(struct cs_etm_decoder *decoder) { ocsd_datapath_resp_t dp_ret; + decoder->prev_return = OCSD_RESP_CONT; + dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET, 0, 0, NULL, NULL); if (OCSD_DATA_RESP_IS_FATAL(dp_ret)) @@ -253,16 +255,16 @@ static void cs_etm_decoder__clear_buffer(struct cs_etm_decoder *decoder) decoder->packet_count = 0; for (i = 0; i < MAX_BUFFER; i++) { decoder->packet_buffer[i].start_addr = 0xdeadbeefdeadbeefUL; - decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL; - decoder->packet_buffer[i].exc = false; - decoder->packet_buffer[i].exc_ret = false; - decoder->packet_buffer[i].cpu = INT_MIN; + decoder->packet_buffer[i].end_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[i].last_instr_taken_branch = false; + decoder->packet_buffer[i].exc = false; + decoder->packet_buffer[i].exc_ret = false; + decoder->packet_buffer[i].cpu = INT_MIN; } } static ocsd_datapath_resp_t cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, - const ocsd_generic_trace_elem *elem, const u8 trace_chan_id, enum cs_etm_sample_type sample_type) { @@ -278,18 +280,16 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, return OCSD_RESP_FATAL_SYS_ERR; et = decoder->tail; + et = (et + 1) & (MAX_BUFFER - 1); + decoder->tail = et; + decoder->packet_count++; + decoder->packet_buffer[et].sample_type = sample_type; - decoder->packet_buffer[et].start_addr = elem->st_addr; - decoder->packet_buffer[et].end_addr = elem->en_addr; decoder->packet_buffer[et].exc = false; decoder->packet_buffer[et].exc_ret = false; decoder->packet_buffer[et].cpu = *((int *)inode->priv); - - /* Wrap around if need be */ - et = (et + 1) & (MAX_BUFFER - 1); - - decoder->tail = et; - decoder->packet_count++; + decoder->packet_buffer[et].start_addr = 0xdeadbeefdeadbeefUL; + decoder->packet_buffer[et].end_addr = 0xdeadbeefdeadbeefUL; if (decoder->packet_count == MAX_BUFFER - 1) return OCSD_RESP_WAIT; @@ -297,6 +297,47 @@ cs_etm_decoder__buffer_packet(struct cs_etm_decoder *decoder, return OCSD_RESP_CONT; } +static ocsd_datapath_resp_t +cs_etm_decoder__buffer_range(struct cs_etm_decoder *decoder, + const ocsd_generic_trace_elem *elem, + const uint8_t trace_chan_id) +{ + int ret = 0; + struct cs_etm_packet *packet; + + ret = cs_etm_decoder__buffer_packet(decoder, trace_chan_id, + CS_ETM_RANGE); + if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT) + return ret; + + packet = &decoder->packet_buffer[decoder->tail]; + + packet->start_addr = elem->st_addr; + packet->end_addr = elem->en_addr; + switch (elem->last_i_type) { + case OCSD_INSTR_BR: + case OCSD_INSTR_BR_INDIRECT: + packet->last_instr_taken_branch = elem->last_instr_exec; + break; + case OCSD_INSTR_ISB: + case OCSD_INSTR_DSB_DMB: + case OCSD_INSTR_OTHER: + default: + packet->last_instr_taken_branch = false; + break; + } + + return ret; +} + +static ocsd_datapath_resp_t +cs_etm_decoder__buffer_trace_on(struct cs_etm_decoder *decoder, + const uint8_t trace_chan_id) +{ + return cs_etm_decoder__buffer_packet(decoder, trace_chan_id, + CS_ETM_TRACE_ON); +} + static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( const void *context, const ocsd_trc_index_t indx __maybe_unused, @@ -313,12 +354,13 @@ static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer( decoder->trace_on = false; break; case OCSD_GEN_TRC_ELEM_TRACE_ON: + resp = cs_etm_decoder__buffer_trace_on(decoder, + trace_chan_id); decoder->trace_on = true; break; case OCSD_GEN_TRC_ELEM_INSTR_RANGE: - resp = cs_etm_decoder__buffer_packet(decoder, elem, - trace_chan_id, - CS_ETM_RANGE); + resp = cs_etm_decoder__buffer_range(decoder, elem, + trace_chan_id); break; case OCSD_GEN_TRC_ELEM_EXCEPTION: decoder->packet_buffer[decoder->tail].exc = true; diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h index 3d2e6205d186..743f5f444304 100644 --- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h +++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.h @@ -24,12 +24,14 @@ struct cs_etm_buffer { enum cs_etm_sample_type { CS_ETM_RANGE = 1 << 0, + CS_ETM_TRACE_ON = 1 << 1, }; struct cs_etm_packet { enum cs_etm_sample_type sample_type; u64 start_addr; u64 end_addr; + u8 last_instr_taken_branch; u8 exc; u8 exc_ret; int cpu; diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c index b9f0a53dfa65..1b0d422373be 100644 --- a/tools/perf/util/cs-etm.c +++ b/tools/perf/util/cs-etm.c @@ -32,6 +32,14 @@ #define MAX_TIMESTAMP (~0ULL) +/* + * A64 instructions are always 4 bytes + * + * Only A64 is supported, so can use this constant for converting between + * addresses and instruction counts, calculting offsets etc + */ +#define A64_INSTR_SIZE 4 + struct cs_etm_auxtrace { struct auxtrace auxtrace; struct auxtrace_queues queues; @@ -45,11 +53,15 @@ struct cs_etm_auxtrace { u8 snapshot_mode; u8 data_queued; u8 sample_branches; + u8 sample_instructions; int num_cpu; u32 auxtrace_type; u64 branches_sample_type; u64 branches_id; + u64 instructions_sample_type; + u64 instructions_sample_period; + u64 instructions_id; u64 **metadata; u64 kernel_start; unsigned int pmu_type; @@ -68,6 +80,12 @@ struct cs_etm_queue { u64 time; u64 timestamp; u64 offset; + u64 period_instructions; + struct branch_stack *last_branch; + struct branch_stack *last_branch_rb; + size_t last_branch_pos; + struct cs_etm_packet *prev_packet; + struct cs_etm_packet *packet; }; static int cs_etm__update_queues(struct cs_etm_auxtrace *etm); @@ -174,6 +192,16 @@ static void cs_etm__free_queue(void *priv) { struct cs_etm_queue *etmq = priv; + if (!etmq) + return; + + thread__zput(etmq->thread); + cs_etm_decoder__free(etmq->decoder); + zfree(&etmq->event_buf); + zfree(&etmq->last_branch); + zfree(&etmq->last_branch_rb); + zfree(&etmq->prev_packet); + zfree(&etmq->packet); free(etmq); } @@ -270,11 +298,35 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm, struct cs_etm_decoder_params d_params; struct cs_etm_trace_params *t_params; struct cs_etm_queue *etmq; + size_t szp = sizeof(struct cs_etm_packet); etmq = zalloc(sizeof(*etmq)); if (!etmq) return NULL; + etmq->packet = zalloc(szp); + if (!etmq->packet) + goto out_free; + + if (etm->synth_opts.last_branch || etm->sample_branches) { + etmq->prev_packet = zalloc(szp); + if (!etmq->prev_packet) + goto out_free; + } + + if (etm->synth_opts.last_branch) { + size_t sz = sizeof(struct branch_stack); + + sz += etm->synth_opts.last_branch_sz * + sizeof(struct branch_entry); + etmq->last_branch = zalloc(sz); + if (!etmq->last_branch) + goto out_free; + etmq->last_branch_rb = zalloc(sz); + if (!etmq->last_branch_rb) + goto out_free; + } + etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE); if (!etmq->event_buf) goto out_free; @@ -329,6 +381,7 @@ static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm, goto out_free_decoder; etmq->offset = 0; + etmq->period_instructions = 0; return etmq; @@ -336,6 +389,10 @@ out_free_decoder: cs_etm_decoder__free(etmq->decoder); out_free: zfree(&etmq->event_buf); + zfree(&etmq->last_branch); + zfree(&etmq->last_branch_rb); + zfree(&etmq->prev_packet); + zfree(&etmq->packet); free(etmq); return NULL; @@ -389,6 +446,129 @@ static int cs_etm__update_queues(struct cs_etm_auxtrace *etm) return 0; } +static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq) +{ + struct branch_stack *bs_src = etmq->last_branch_rb; + struct branch_stack *bs_dst = etmq->last_branch; + size_t nr = 0; + + /* + * Set the number of records before early exit: ->nr is used to + * determine how many branches to copy from ->entries. + */ + bs_dst->nr = bs_src->nr; + + /* + * Early exit when there is nothing to copy. + */ + if (!bs_src->nr) + return; + + /* + * As bs_src->entries is a circular buffer, we need to copy from it in + * two steps. First, copy the branches from the most recently inserted + * branch ->last_branch_pos until the end of bs_src->entries buffer. + */ + nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos; + memcpy(&bs_dst->entries[0], + &bs_src->entries[etmq->last_branch_pos], + sizeof(struct branch_entry) * nr); + + /* + * If we wrapped around at least once, the branches from the beginning + * of the bs_src->entries buffer and until the ->last_branch_pos element + * are older valid branches: copy them over. The total number of + * branches copied over will be equal to the number of branches asked by + * the user in last_branch_sz. + */ + if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) { + memcpy(&bs_dst->entries[nr], + &bs_src->entries[0], + sizeof(struct branch_entry) * etmq->last_branch_pos); + } +} + +static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq) +{ + etmq->last_branch_pos = 0; + etmq->last_branch_rb->nr = 0; +} + +static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet) +{ + /* + * The packet records the execution range with an exclusive end address + * + * A64 instructions are constant size, so the last executed + * instruction is A64_INSTR_SIZE before the end address + * Will need to do instruction level decode for T32 instructions as + * they can be variable size (not yet supported). + */ + return packet->end_addr - A64_INSTR_SIZE; +} + +static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet) +{ + /* + * Only A64 instructions are currently supported, so can get + * instruction count by dividing. + * Will need to do instruction level decode for T32 instructions as + * they can be variable size (not yet supported). + */ + return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE; +} + +static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet, + u64 offset) +{ + /* + * Only A64 instructions are currently supported, so can get + * instruction address by muliplying. + * Will need to do instruction level decode for T32 instructions as + * they can be variable size (not yet supported). + */ + return packet->start_addr + offset * A64_INSTR_SIZE; +} + +static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq) +{ + struct branch_stack *bs = etmq->last_branch_rb; + struct branch_entry *be; + + /* + * The branches are recorded in a circular buffer in reverse + * chronological order: we start recording from the last element of the + * buffer down. After writing the first element of the stack, move the + * insert position back to the end of the buffer. + */ + if (!etmq->last_branch_pos) + etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz; + + etmq->last_branch_pos -= 1; + + be = &bs->entries[etmq->last_branch_pos]; + be->from = cs_etm__last_executed_instr(etmq->prev_packet); + be->to = etmq->packet->start_addr; + /* No support for mispredict */ + be->flags.mispred = 0; + be->flags.predicted = 1; + + /* + * Increment bs->nr until reaching the number of last branches asked by + * the user on the command line. + */ + if (bs->nr < etmq->etm->synth_opts.last_branch_sz) + bs->nr += 1; +} + +static int cs_etm__inject_event(union perf_event *event, + struct perf_sample *sample, u64 type) +{ + event->header.size = perf_event__sample_event_size(sample, type, 0); + return perf_event__synthesize_sample(event, type, 0, sample); +} + + static int cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq) { @@ -453,35 +633,105 @@ static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm, } } +static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq, + u64 addr, u64 period) +{ + int ret = 0; + struct cs_etm_auxtrace *etm = etmq->etm; + union perf_event *event = etmq->event_buf; + struct perf_sample sample = {.ip = 0,}; + + event->sample.header.type = PERF_RECORD_SAMPLE; + event->sample.header.misc = PERF_RECORD_MISC_USER; + event->sample.header.size = sizeof(struct perf_event_header); + + sample.ip = addr; + sample.pid = etmq->pid; + sample.tid = etmq->tid; + sample.id = etmq->etm->instructions_id; + sample.stream_id = etmq->etm->instructions_id; + sample.period = period; + sample.cpu = etmq->packet->cpu; + sample.flags = 0; + sample.insn_len = 1; + sample.cpumode = event->header.misc; + + if (etm->synth_opts.last_branch) { + cs_etm__copy_last_branch_rb(etmq); + sample.branch_stack = etmq->last_branch; + } + + if (etm->synth_opts.inject) { + ret = cs_etm__inject_event(event, &sample, + etm->instructions_sample_type); + if (ret) + return ret; + } + + ret = perf_session__deliver_synth_event(etm->session, event, &sample); + + if (ret) + pr_err( + "CS ETM Trace: failed to deliver instruction event, error %d\n", + ret); + + if (etm->synth_opts.last_branch) + cs_etm__reset_last_branch_rb(etmq); + + return ret; +} + /* * The cs etm packet encodes an instruction range between a branch target * and the next taken branch. Generate sample accordingly. */ -static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq, - struct cs_etm_packet *packet) +static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq) { int ret = 0; struct cs_etm_auxtrace *etm = etmq->etm; struct perf_sample sample = {.ip = 0,}; union perf_event *event = etmq->event_buf; - u64 start_addr = packet->start_addr; - u64 end_addr = packet->end_addr; + struct dummy_branch_stack { + u64 nr; + struct branch_entry entries; + } dummy_bs; event->sample.header.type = PERF_RECORD_SAMPLE; event->sample.header.misc = PERF_RECORD_MISC_USER; event->sample.header.size = sizeof(struct perf_event_header); - sample.ip = start_addr; + sample.ip = cs_etm__last_executed_instr(etmq->prev_packet); sample.pid = etmq->pid; sample.tid = etmq->tid; - sample.addr = end_addr; + sample.addr = etmq->packet->start_addr; sample.id = etmq->etm->branches_id; sample.stream_id = etmq->etm->branches_id; sample.period = 1; - sample.cpu = packet->cpu; + sample.cpu = etmq->packet->cpu; sample.flags = 0; sample.cpumode = PERF_RECORD_MISC_USER; + /* + * perf report cannot handle events without a branch stack + */ + if (etm->synth_opts.last_branch) { + dummy_bs = (struct dummy_branch_stack){ + .nr = 1, + .entries = { + .from = sample.ip, + .to = sample.addr, + }, + }; + sample.branch_stack = (struct branch_stack *)&dummy_bs; + } + + if (etm->synth_opts.inject) { + ret = cs_etm__inject_event(event, &sample, + etm->branches_sample_type); + if (ret) + return ret; + } + ret = perf_session__deliver_synth_event(etm->session, event, &sample); if (ret) @@ -578,6 +828,24 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, etm->sample_branches = true; etm->branches_sample_type = attr.sample_type; etm->branches_id = id; + id += 1; + attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR; + } + + if (etm->synth_opts.last_branch) + attr.sample_type |= PERF_SAMPLE_BRANCH_STACK; + + if (etm->synth_opts.instructions) { + attr.config = PERF_COUNT_HW_INSTRUCTIONS; + attr.sample_period = etm->synth_opts.period; + etm->instructions_sample_period = attr.sample_period; + err = cs_etm__synth_event(session, &attr, id); + if (err) + return err; + etm->sample_instructions = true; + etm->instructions_sample_type = attr.sample_type; + etm->instructions_id = id; + id += 1; } return 0; @@ -585,25 +853,108 @@ static int cs_etm__synth_events(struct cs_etm_auxtrace *etm, static int cs_etm__sample(struct cs_etm_queue *etmq) { + struct cs_etm_auxtrace *etm = etmq->etm; + struct cs_etm_packet *tmp; int ret; - struct cs_etm_packet packet; + u64 instrs_executed; - while (1) { - ret = cs_etm_decoder__get_packet(etmq->decoder, &packet); - if (ret <= 0) + instrs_executed = cs_etm__instr_count(etmq->packet); + etmq->period_instructions += instrs_executed; + + /* + * Record a branch when the last instruction in + * PREV_PACKET is a branch. + */ + if (etm->synth_opts.last_branch && + etmq->prev_packet && + etmq->prev_packet->sample_type == CS_ETM_RANGE && + etmq->prev_packet->last_instr_taken_branch) + cs_etm__update_last_branch_rb(etmq); + + if (etm->sample_instructions && + etmq->period_instructions >= etm->instructions_sample_period) { + /* + * Emit instruction sample periodically + * TODO: allow period to be defined in cycles and clock time + */ + + /* Get number of instructions executed after the sample point */ + u64 instrs_over = etmq->period_instructions - + etm->instructions_sample_period; + + /* + * Calculate the address of the sampled instruction (-1 as + * sample is reported as though instruction has just been + * executed, but PC has not advanced to next instruction) + */ + u64 offset = (instrs_executed - instrs_over - 1); + u64 addr = cs_etm__instr_addr(etmq->packet, offset); + + ret = cs_etm__synth_instruction_sample( + etmq, addr, etm->instructions_sample_period); + if (ret) + return ret; + + /* Carry remaining instructions into next sample period */ + etmq->period_instructions = instrs_over; + } + + if (etm->sample_branches && + etmq->prev_packet && + etmq->prev_packet->sample_type == CS_ETM_RANGE && + etmq->prev_packet->last_instr_taken_branch) { + ret = cs_etm__synth_branch_sample(etmq); + if (ret) return ret; + } + if (etm->sample_branches || etm->synth_opts.last_branch) { /* - * If the packet contains an instruction range, generate an - * instruction sequence event. + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for + * the next incoming packet. */ - if (packet.sample_type & CS_ETM_RANGE) - cs_etm__synth_branch_sample(etmq, &packet); + tmp = etmq->packet; + etmq->packet = etmq->prev_packet; + etmq->prev_packet = tmp; } return 0; } +static int cs_etm__flush(struct cs_etm_queue *etmq) +{ + int err = 0; + struct cs_etm_packet *tmp; + + if (etmq->etm->synth_opts.last_branch && + etmq->prev_packet && + etmq->prev_packet->sample_type == CS_ETM_RANGE) { + /* + * Generate a last branch event for the branches left in the + * circular buffer at the end of the trace. + * + * Use the address of the end of the last reported execution + * range + */ + u64 addr = cs_etm__last_executed_instr(etmq->prev_packet); + + err = cs_etm__synth_instruction_sample( + etmq, addr, + etmq->period_instructions); + etmq->period_instructions = 0; + + /* + * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for + * the next incoming packet. + */ + tmp = etmq->packet; + etmq->packet = etmq->prev_packet; + etmq->prev_packet = tmp; + } + + return err; +} + static int cs_etm__run_decoder(struct cs_etm_queue *etmq) { struct cs_etm_auxtrace *etm = etmq->etm; @@ -615,45 +966,72 @@ static int cs_etm__run_decoder(struct cs_etm_queue *etmq) etm->kernel_start = machine__kernel_start(etm->machine); /* Go through each buffer in the queue and decode them one by one */ -more: - buffer_used = 0; - memset(&buffer, 0, sizeof(buffer)); - err = cs_etm__get_trace(&buffer, etmq); - if (err <= 0) - return err; - /* - * We cannot assume consecutive blocks in the data file are contiguous, - * reset the decoder to force re-sync. - */ - err = cs_etm_decoder__reset(etmq->decoder); - if (err != 0) - return err; - - /* Run trace decoder until buffer consumed or end of trace */ - do { - processed = 0; - - err = cs_etm_decoder__process_data_block( - etmq->decoder, - etmq->offset, - &buffer.buf[buffer_used], - buffer.len - buffer_used, - &processed); - - if (err) + while (1) { + buffer_used = 0; + memset(&buffer, 0, sizeof(buffer)); + err = cs_etm__get_trace(&buffer, etmq); + if (err <= 0) return err; - - etmq->offset += processed; - buffer_used += processed; - /* - * Nothing to do with an error condition, let's hope the next - * chunk will be better. + * We cannot assume consecutive blocks in the data file are + * contiguous, reset the decoder to force re-sync. */ - err = cs_etm__sample(etmq); - } while (buffer.len > buffer_used); + err = cs_etm_decoder__reset(etmq->decoder); + if (err != 0) + return err; + + /* Run trace decoder until buffer consumed or end of trace */ + do { + processed = 0; + err = cs_etm_decoder__process_data_block( + etmq->decoder, + etmq->offset, + &buffer.buf[buffer_used], + buffer.len - buffer_used, + &processed); + if (err) + return err; + + etmq->offset += processed; + buffer_used += processed; + + /* Process each packet in this chunk */ + while (1) { + err = cs_etm_decoder__get_packet(etmq->decoder, + etmq->packet); + if (err <= 0) + /* + * Stop processing this chunk on + * end of data or error + */ + break; + + switch (etmq->packet->sample_type) { + case CS_ETM_RANGE: + /* + * If the packet contains an instruction + * range, generate instruction sequence + * events. + */ + cs_etm__sample(etmq); + break; + case CS_ETM_TRACE_ON: + /* + * Discontinuity in trace, flush + * previous branch stack + */ + cs_etm__flush(etmq); + break; + default: + break; + } + } + } while (buffer.len > buffer_used); -goto more; + if (err == 0) + /* Flush any remaining branch stack entries */ + err = cs_etm__flush(etmq); + } return err; } diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c index 44e603c27944..f0a6cbd033cc 100644 --- a/tools/perf/util/event.c +++ b/tools/perf/util/event.c @@ -894,8 +894,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, struct machine *machine) { size_t size; - const char *mmap_name; - char name_buff[PATH_MAX]; struct map *map = machine__kernel_map(machine); struct kmap *kmap; int err; @@ -918,7 +916,6 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, return -1; } - mmap_name = machine__mmap_name(machine, name_buff, sizeof(name_buff)); if (machine__is_host(machine)) { /* * kernel uses PERF_RECORD_MISC_USER for user space maps, @@ -931,7 +928,7 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool, kmap = map__kmap(map); size = snprintf(event->mmap.filename, sizeof(event->mmap.filename), - "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1; + "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1; size = PERF_ALIGN(size, sizeof(u64)); event->mmap.header.type = PERF_RECORD_MMAP; event->mmap.header.size = (sizeof(event->mmap) - @@ -1591,17 +1588,6 @@ int machine__resolve(struct machine *machine, struct addr_location *al, return -1; dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid); - /* - * Have we already created the kernel maps for this machine? - * - * This should have happened earlier, when we processed the kernel MMAP - * events, but for older perf.data files there was no such thing, so do - * it now. - */ - if (sample->cpumode == PERF_RECORD_MISC_KERNEL && - machine__kernel_map(machine) == NULL) - machine__create_kernel_maps(machine); - thread__find_addr_map(thread, sample->cpumode, MAP__FUNCTION, sample->ip, al); dump_printf(" ...... dso: %s\n", al->map ? al->map->dso->long_name : diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index e5fc14e53c05..7b7d535396f7 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c @@ -1086,11 +1086,30 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages) int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target) { + bool all_threads = (target->per_thread && target->system_wide); struct cpu_map *cpus; struct thread_map *threads; + /* + * If specify '-a' and '--per-thread' to perf record, perf record + * will override '--per-thread'. target->per_thread = false and + * target->system_wide = true. + * + * If specify '--per-thread' only to perf record, + * target->per_thread = true and target->system_wide = false. + * + * So target->per_thread && target->system_wide is false. + * For perf record, thread_map__new_str doesn't call + * thread_map__new_all_cpus. That will keep perf record's + * current behavior. + * + * For perf stat, it allows the case that target->per_thread and + * target->system_wide are all true. It means to collect system-wide + * per-thread data. thread_map__new_str will call + * thread_map__new_all_cpus to enumerate all threads. + */ threads = thread_map__new_str(target->pid, target->tid, target->uid, - target->per_thread); + all_threads); if (!threads) return -1; diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index f28aaaa3a440..942bdec6d70d 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h @@ -174,4 +174,5 @@ int write_padded(struct feat_fd *fd, const void *bf, int get_cpuid(char *buffer, size_t sz); char *get_cpuid_str(struct perf_pmu *pmu __maybe_unused); +int strcmp_cpuid_str(const char *s1, const char *s2); #endif /* __PERF_HEADER_H */ diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index b6140950301e..44a8456cea10 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -879,7 +879,7 @@ iter_prepare_cumulative_entry(struct hist_entry_iter *iter, * cumulated only one time to prevent entries more than 100% * overhead. */ - he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1)); + he_cache = malloc(sizeof(*he_cache) * (callchain_cursor.nr + 1)); if (he_cache == NULL) return -ENOMEM; @@ -1045,8 +1045,6 @@ int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al, if (err) return err; - iter->max_stack = max_stack_depth; - err = iter->ops->prepare_entry(iter, al); if (err) goto out; diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index 02721b579746..e869cad4d89f 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -107,7 +107,6 @@ struct hist_entry_iter { int curr; bool hide_unresolved; - int max_stack; struct perf_evsel *evsel; struct perf_sample *sample; diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index b05a67464c03..fe27ef55cbb9 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -48,8 +48,31 @@ static void machine__threads_init(struct machine *machine) } } +static int machine__set_mmap_name(struct machine *machine) +{ + if (machine__is_host(machine)) { + if (symbol_conf.vmlinux_name) + machine->mmap_name = strdup(symbol_conf.vmlinux_name); + else + machine->mmap_name = strdup("[kernel.kallsyms]"); + } else if (machine__is_default_guest(machine)) { + if (symbol_conf.default_guest_vmlinux_name) + machine->mmap_name = strdup(symbol_conf.default_guest_vmlinux_name); + else + machine->mmap_name = strdup("[guest.kernel.kallsyms]"); + } else { + if (asprintf(&machine->mmap_name, "[guest.kernel.kallsyms.%d]", + machine->pid) < 0) + machine->mmap_name = NULL; + } + + return machine->mmap_name ? 0 : -ENOMEM; +} + int machine__init(struct machine *machine, const char *root_dir, pid_t pid) { + int err = -ENOMEM; + memset(machine, 0, sizeof(*machine)); map_groups__init(&machine->kmaps, machine); RB_CLEAR_NODE(&machine->rb_node); @@ -73,13 +96,16 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) if (machine->root_dir == NULL) return -ENOMEM; + if (machine__set_mmap_name(machine)) + goto out; + if (pid != HOST_KERNEL_ID) { struct thread *thread = machine__findnew_thread(machine, -1, pid); char comm[64]; if (thread == NULL) - return -ENOMEM; + goto out; snprintf(comm, sizeof(comm), "[guest/%d]", pid); thread__set_comm(thread, comm, 0); @@ -87,7 +113,13 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid) } machine->current_tid = NULL; + err = 0; +out: + if (err) { + zfree(&machine->root_dir); + zfree(&machine->mmap_name); + } return 0; } @@ -119,7 +151,7 @@ struct machine *machine__new_kallsyms(void) * ask for not using the kcore parsing code, once this one is fixed * to create a map per module. */ - if (machine && __machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION, true) <= 0) { + if (machine && machine__load_kallsyms(machine, "/proc/kallsyms", MAP__FUNCTION) <= 0) { machine__delete(machine); machine = NULL; } @@ -180,6 +212,7 @@ void machine__exit(struct machine *machine) dsos__exit(&machine->dsos); machine__exit_vdso(machine); zfree(&machine->root_dir); + zfree(&machine->mmap_name); zfree(&machine->current_tid); for (i = 0; i < THREADS__TABLE_SIZE; i++) { @@ -322,20 +355,6 @@ void machines__process_guests(struct machines *machines, } } -char *machine__mmap_name(struct machine *machine, char *bf, size_t size) -{ - if (machine__is_host(machine)) - snprintf(bf, size, "[%s]", "kernel.kallsyms"); - else if (machine__is_default_guest(machine)) - snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); - else { - snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", - machine->pid); - } - - return bf; -} - void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size) { struct rb_node *node; @@ -771,25 +790,13 @@ size_t machine__fprintf(struct machine *machine, FILE *fp) static struct dso *machine__get_kernel(struct machine *machine) { - const char *vmlinux_name = NULL; + const char *vmlinux_name = machine->mmap_name; struct dso *kernel; if (machine__is_host(machine)) { - vmlinux_name = symbol_conf.vmlinux_name; - if (!vmlinux_name) - vmlinux_name = DSO__NAME_KALLSYMS; - kernel = machine__findnew_kernel(machine, vmlinux_name, "[kernel]", DSO_TYPE_KERNEL); } else { - char bf[PATH_MAX]; - - if (machine__is_default_guest(machine)) - vmlinux_name = symbol_conf.default_guest_vmlinux_name; - if (!vmlinux_name) - vmlinux_name = machine__mmap_name(machine, bf, - sizeof(bf)); - kernel = machine__findnew_kernel(machine, vmlinux_name, "[guest.kernel]", DSO_TYPE_GUEST_KERNEL); @@ -849,13 +856,10 @@ static int machine__get_running_kernel_start(struct machine *machine, return 0; } -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) +static int +__machine__create_kernel_maps(struct machine *machine, struct dso *kernel) { int type; - u64 start = 0; - - if (machine__get_running_kernel_start(machine, NULL, &start)) - return -1; /* In case of renewal the kernel map, destroy previous one */ machine__destroy_kernel_maps(machine); @@ -864,7 +868,7 @@ int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) struct kmap *kmap; struct map *map; - machine->vmlinux_maps[type] = map__new2(start, kernel, type); + machine->vmlinux_maps[type] = map__new2(0, kernel, type); if (machine->vmlinux_maps[type] == NULL) return -1; @@ -987,11 +991,11 @@ int machines__create_kernel_maps(struct machines *machines, pid_t pid) return machine__create_kernel_maps(machine); } -int __machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type, bool no_kcore) +int machine__load_kallsyms(struct machine *machine, const char *filename, + enum map_type type) { struct map *map = machine__kernel_map(machine); - int ret = __dso__load_kallsyms(map->dso, filename, map, no_kcore); + int ret = __dso__load_kallsyms(map->dso, filename, map, true); if (ret > 0) { dso__set_loaded(map->dso, type); @@ -1006,12 +1010,6 @@ int __machine__load_kallsyms(struct machine *machine, const char *filename, return ret; } -int machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type) -{ - return __machine__load_kallsyms(machine, filename, type, false); -} - int machine__load_vmlinux_path(struct machine *machine, enum map_type type) { struct map *map = machine__kernel_map(machine); @@ -1215,6 +1213,24 @@ static int machine__create_modules(struct machine *machine) return 0; } +static void machine__set_kernel_mmap(struct machine *machine, + u64 start, u64 end) +{ + int i; + + for (i = 0; i < MAP__NR_TYPES; i++) { + machine->vmlinux_maps[i]->start = start; + machine->vmlinux_maps[i]->end = end; + + /* + * Be a bit paranoid here, some perf.data file came with + * a zero sized synthesized MMAP event for the kernel. + */ + if (machine->vmlinux_maps[i]->end == 0) + machine->vmlinux_maps[i]->end = ~0ULL; + } +} + int machine__create_kernel_maps(struct machine *machine) { struct dso *kernel = machine__get_kernel(machine); @@ -1239,40 +1255,22 @@ int machine__create_kernel_maps(struct machine *machine) "continuing anyway...\n", machine->pid); } - /* - * Now that we have all the maps created, just set the ->end of them: - */ - map_groups__fixup_end(&machine->kmaps); - if (!machine__get_running_kernel_start(machine, &name, &addr)) { if (name && maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name, addr)) { machine__destroy_kernel_maps(machine); return -1; } + machine__set_kernel_mmap(machine, addr, 0); } + /* + * Now that we have all the maps created, just set the ->end of them: + */ + map_groups__fixup_end(&machine->kmaps); return 0; } -static void machine__set_kernel_mmap_len(struct machine *machine, - union perf_event *event) -{ - int i; - - for (i = 0; i < MAP__NR_TYPES; i++) { - machine->vmlinux_maps[i]->start = event->mmap.start; - machine->vmlinux_maps[i]->end = (event->mmap.start + - event->mmap.len); - /* - * Be a bit paranoid here, some perf.data file came with - * a zero sized synthesized MMAP event for the kernel. - */ - if (machine->vmlinux_maps[i]->end == 0) - machine->vmlinux_maps[i]->end = ~0ULL; - } -} - static bool machine__uses_kcore(struct machine *machine) { struct dso *dso; @@ -1289,7 +1287,6 @@ static int machine__process_kernel_mmap_event(struct machine *machine, union perf_event *event) { struct map *map; - char kmmap_prefix[PATH_MAX]; enum dso_kernel_type kernel_type; bool is_kernel_mmap; @@ -1297,15 +1294,14 @@ static int machine__process_kernel_mmap_event(struct machine *machine, if (machine__uses_kcore(machine)) return 0; - machine__mmap_name(machine, kmmap_prefix, sizeof(kmmap_prefix)); if (machine__is_host(machine)) kernel_type = DSO_TYPE_KERNEL; else kernel_type = DSO_TYPE_GUEST_KERNEL; is_kernel_mmap = memcmp(event->mmap.filename, - kmmap_prefix, - strlen(kmmap_prefix) - 1) == 0; + machine->mmap_name, + strlen(machine->mmap_name) - 1) == 0; if (event->mmap.filename[0] == '/' || (!is_kernel_mmap && event->mmap.filename[0] == '[')) { map = machine__findnew_module_map(machine, event->mmap.start, @@ -1316,7 +1312,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, map->end = map->start + event->mmap.len; } else if (is_kernel_mmap) { const char *symbol_name = (event->mmap.filename + - strlen(kmmap_prefix)); + strlen(machine->mmap_name)); /* * Should be there already, from the build-id table in * the header. @@ -1357,7 +1353,7 @@ static int machine__process_kernel_mmap_event(struct machine *machine, up_read(&machine->dsos.lock); if (kernel == NULL) - kernel = machine__findnew_dso(machine, kmmap_prefix); + kernel = machine__findnew_dso(machine, machine->mmap_name); if (kernel == NULL) goto out_problem; @@ -1370,7 +1366,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine, if (strstr(kernel->long_name, "vmlinux")) dso__set_short_name(kernel, "[kernel.vmlinux]", false); - machine__set_kernel_mmap_len(machine, event); + machine__set_kernel_mmap(machine, event->mmap.start, + event->mmap.start + event->mmap.len); /* * Avoid using a zero address (kptr_restrict) for the ref reloc diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h index 5ce860b64c74..66cc200ef86f 100644 --- a/tools/perf/util/machine.h +++ b/tools/perf/util/machine.h @@ -43,6 +43,7 @@ struct machine { bool comm_exec; bool kptr_restrict_warned; char *root_dir; + char *mmap_name; struct threads threads[THREADS__TABLE_SIZE]; struct vdso_info *vdso_info; struct perf_env *env; @@ -142,8 +143,6 @@ struct machine *machines__find(struct machines *machines, pid_t pid); struct machine *machines__findnew(struct machines *machines, pid_t pid); void machines__set_id_hdr_size(struct machines *machines, u16 id_hdr_size); -char *machine__mmap_name(struct machine *machine, char *bf, size_t size); - void machines__set_comm_exec(struct machines *machines, bool comm_exec); struct machine *machine__new_host(void); @@ -226,8 +225,6 @@ struct map *machine__findnew_module_map(struct machine *machine, u64 start, const char *filename); int arch__fix_module_text_start(u64 *start, const char *name); -int __machine__load_kallsyms(struct machine *machine, const char *filename, - enum map_type type, bool no_kcore); int machine__load_kallsyms(struct machine *machine, const char *filename, enum map_type type); int machine__load_vmlinux_path(struct machine *machine, enum map_type type); @@ -239,7 +236,6 @@ size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp, bool (skip)(struct dso *dso, int parm), int parm); void machine__destroy_kernel_maps(struct machine *machine); -int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel); int machine__create_kernel_maps(struct machine *machine); int machines__create_kernel_maps(struct machines *machines, pid_t pid); diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c index 57e38fdf0b34..1111d5bf15ca 100644 --- a/tools/perf/util/pmu.c +++ b/tools/perf/util/pmu.c @@ -576,6 +576,34 @@ char * __weak get_cpuid_str(struct perf_pmu *pmu __maybe_unused) return NULL; } +/* Return zero when the cpuid from the mapfile.csv matches the + * cpuid string generated on this platform. + * Otherwise return non-zero. + */ +int __weak strcmp_cpuid_str(const char *mapcpuid, const char *cpuid) +{ + regex_t re; + regmatch_t pmatch[1]; + int match; + + if (regcomp(&re, mapcpuid, REG_EXTENDED) != 0) { + /* Warn unable to generate match particular string. */ + pr_info("Invalid regular expression %s\n", mapcpuid); + return 1; + } + + match = !regexec(&re, cpuid, 1, pmatch, 0); + regfree(&re); + if (match) { + size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); + + /* Verify the entire string matched. */ + if (match_len == strlen(cpuid)) + return 0; + } + return 1; +} + static char *perf_pmu__getcpuid(struct perf_pmu *pmu) { char *cpuid; @@ -610,31 +638,14 @@ struct pmu_events_map *perf_pmu__find_map(struct perf_pmu *pmu) i = 0; for (;;) { - regex_t re; - regmatch_t pmatch[1]; - int match; - map = &pmu_events_map[i++]; if (!map->table) { map = NULL; break; } - if (regcomp(&re, map->cpuid, REG_EXTENDED) != 0) { - /* Warn unable to generate match particular string. */ - pr_info("Invalid regular expression %s\n", map->cpuid); + if (!strcmp_cpuid_str(map->cpuid, cpuid)) break; - } - - match = !regexec(&re, cpuid, 1, pmatch, 0); - regfree(&re); - if (match) { - size_t match_len = (pmatch[0].rm_eo - pmatch[0].rm_so); - - /* Verify the entire string matched. */ - if (match_len == strlen(cpuid)) - break; - } } free(cpuid); return map; diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c index 2da4d0456a03..e8514f651865 100644 --- a/tools/perf/util/sort.c +++ b/tools/perf/util/sort.c @@ -111,17 +111,20 @@ struct sort_entry sort_thread = { /* --sort comm */ +/* + * We can't use pointer comparison in functions below, + * because it gives different results based on pointer + * values, which could break some sorting assumptions. + */ static int64_t sort__comm_cmp(struct hist_entry *left, struct hist_entry *right) { - /* Compare the addr that should be unique among comm */ return strcmp(comm__str(right->comm), comm__str(left->comm)); } static int64_t sort__comm_collapse(struct hist_entry *left, struct hist_entry *right) { - /* Compare the addr that should be unique among comm */ return strcmp(comm__str(right->comm), comm__str(left->comm)); } diff --git a/tools/perf/util/stat.h b/tools/perf/util/stat.h index dbc6f7134f61..2f44e386a0e8 100644 --- a/tools/perf/util/stat.h +++ b/tools/perf/util/stat.h @@ -90,6 +90,8 @@ struct perf_stat_config { bool scale; FILE *output; unsigned int interval; + unsigned int timeout; + int times; struct runtime_stat *stats; int stats_num; }; diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c index cc065d4bfafc..a1a312d99f30 100644 --- a/tools/perf/util/symbol.c +++ b/tools/perf/util/symbol.c @@ -1582,7 +1582,7 @@ int dso__load(struct dso *dso, struct map *map) bool next_slot = false; bool is_reg; bool nsexit; - int sirc; + int sirc = -1; enum dso_binary_type symtab_type = binary_type_symtab[i]; @@ -1600,16 +1600,14 @@ int dso__load(struct dso *dso, struct map *map) nsinfo__mountns_exit(&nsc); is_reg = is_regular_file(name); - sirc = symsrc__init(ss, dso, name, symtab_type); + if (is_reg) + sirc = symsrc__init(ss, dso, name, symtab_type); if (nsexit) nsinfo__mountns_enter(dso->nsinfo, &nsc); - if (!is_reg || sirc < 0) { - if (sirc >= 0) - symsrc__destroy(ss); + if (!is_reg || sirc < 0) continue; - } if (!syms_ss && symsrc__has_symtab(ss)) { syms_ss = ss; @@ -1960,8 +1958,7 @@ static int dso__load_guest_kernel_sym(struct dso *dso, struct map *map) pr_debug("Using %s for symbols\n", kallsyms_filename); if (err > 0 && !dso__is_kcore(dso)) { dso->binary_type = DSO_BINARY_TYPE__GUEST_KALLSYMS; - machine__mmap_name(machine, path, sizeof(path)); - dso__set_long_name(dso, strdup(path), true); + dso__set_long_name(dso, machine->mmap_name, false); map__fixup_start(map); map__fixup_end(map); } diff --git a/tools/perf/util/syscalltbl.c b/tools/perf/util/syscalltbl.c index 303bdb84ab5a..895122d638dd 100644 --- a/tools/perf/util/syscalltbl.c +++ b/tools/perf/util/syscalltbl.c @@ -30,6 +30,14 @@ static const char **syscalltbl_native = syscalltbl_x86_64; #include <asm/syscalls_64.c> const int syscalltbl_native_max_id = SYSCALLTBL_S390_64_MAX_ID; static const char **syscalltbl_native = syscalltbl_s390_64; +#elif defined(__powerpc64__) +#include <asm/syscalls_64.c> +const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_64_MAX_ID; +static const char **syscalltbl_native = syscalltbl_powerpc_64; +#elif defined(__powerpc__) +#include <asm/syscalls_32.c> +const int syscalltbl_native_max_id = SYSCALLTBL_POWERPC_32_MAX_ID; +static const char **syscalltbl_native = syscalltbl_powerpc_32; #endif struct syscall { diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c index 3e1038f6491c..729dad8f412d 100644 --- a/tools/perf/util/thread_map.c +++ b/tools/perf/util/thread_map.c @@ -323,7 +323,7 @@ out_free_threads: } struct thread_map *thread_map__new_str(const char *pid, const char *tid, - uid_t uid, bool per_thread) + uid_t uid, bool all_threads) { if (pid) return thread_map__new_by_pid_str(pid); @@ -331,7 +331,7 @@ struct thread_map *thread_map__new_str(const char *pid, const char *tid, if (!tid && uid != UINT_MAX) return thread_map__new_by_uid(uid); - if (per_thread) + if (all_threads) return thread_map__new_all_cpus(); return thread_map__new_by_tid_str(tid); diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h index 0a806b99e73c..5ec91cfd1869 100644 --- a/tools/perf/util/thread_map.h +++ b/tools/perf/util/thread_map.h @@ -31,7 +31,7 @@ struct thread_map *thread_map__get(struct thread_map *map); void thread_map__put(struct thread_map *map); struct thread_map *thread_map__new_str(const char *pid, - const char *tid, uid_t uid, bool per_thread); + const char *tid, uid_t uid, bool all_threads); struct thread_map *thread_map__new_by_tid_str(const char *tid_str); |