diff options
author | Namhyung Kim <namhyung@kernel.org> | 2022-06-15 19:32:20 +0300 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2022-07-12 15:55:37 +0300 |
commit | 3ae03f2650b8d87242906f6c160732bc9d991ca1 (patch) | |
tree | 57169f3d9bf356840ed10ea98a30bb71282663c2 /tools/perf/builtin-lock.c | |
parent | 166a9764a38e37089088eb21f31cfcb6d5a0dde2 (diff) | |
download | linux-3ae03f2650b8d87242906f6c160732bc9d991ca1.tar.xz |
perf lock: Handle lock contention tracepoints
When the lock contention events are used, there's no tracking of
acquire and release. So the state machine is simplified to use
UNINITIALIZED -> CONTENDED -> ACQUIRED only.
Note that CONTENDED state is re-entrant since mutex locks can hit two
or more consecutive contention_begin events for optimistic spinning
and sleep.
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
Acked-by: Ian Rogers <irogers@google.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Waiman Long <longman@redhat.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220615163222.1275500-6-namhyung@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf/builtin-lock.c')
-rw-r--r-- | tools/perf/builtin-lock.c | 137 |
1 files changed, 137 insertions, 0 deletions
diff --git a/tools/perf/builtin-lock.c b/tools/perf/builtin-lock.c index 9e3b90cac505..546dad1963c8 100644 --- a/tools/perf/builtin-lock.c +++ b/tools/perf/builtin-lock.c @@ -483,6 +483,18 @@ static struct lock_stat *pop_from_result(void) return container_of(node, struct lock_stat, rb); } +static struct lock_stat *lock_stat_find(u64 addr) +{ + struct hlist_head *entry = lockhashentry(addr); + struct lock_stat *ret; + + hlist_for_each_entry(ret, entry, hash_entry) { + if (ret->addr == addr) + return ret; + } + return NULL; +} + static struct lock_stat *lock_stat_findnew(u64 addr, const char *name) { struct hlist_head *entry = lockhashentry(addr); @@ -827,6 +839,124 @@ end: return 0; } +static int report_lock_contention_begin_event(struct evsel *evsel, + struct perf_sample *sample) +{ + struct lock_stat *ls; + struct thread_stat *ts; + struct lock_seq_stat *seq; + u64 addr = evsel__intval(evsel, sample, "lock_addr"); + + if (show_thread_stats) + addr = sample->tid; + + ls = lock_stat_findnew(addr, "No name"); + if (!ls) + return -ENOMEM; + + ts = thread_stat_findnew(sample->tid); + if (!ts) + return -ENOMEM; + + seq = get_seq(ts, addr); + if (!seq) + return -ENOMEM; + + switch (seq->state) { + case SEQ_STATE_UNINITIALIZED: + case SEQ_STATE_ACQUIRED: + break; + case SEQ_STATE_CONTENDED: + /* + * It can have nested contention begin with mutex spinning, + * then we would use the original contention begin event and + * ignore the second one. + */ + goto end; + case SEQ_STATE_ACQUIRING: + case SEQ_STATE_READ_ACQUIRED: + case SEQ_STATE_RELEASED: + /* broken lock sequence */ + if (!ls->broken) { + ls->broken = 1; + bad_hist[BROKEN_CONTENDED]++; + } + list_del_init(&seq->list); + free(seq); + goto end; + default: + BUG_ON("Unknown state of lock sequence found!\n"); + break; + } + + if (seq->state != SEQ_STATE_CONTENDED) { + seq->state = SEQ_STATE_CONTENDED; + seq->prev_event_time = sample->time; + ls->nr_contended++; + } +end: + return 0; +} + +static int report_lock_contention_end_event(struct evsel *evsel, + struct perf_sample *sample) +{ + struct lock_stat *ls; + struct thread_stat *ts; + struct lock_seq_stat *seq; + u64 contended_term; + u64 addr = evsel__intval(evsel, sample, "lock_addr"); + + if (show_thread_stats) + addr = sample->tid; + + ls = lock_stat_find(addr); + if (!ls) + return 0; + + ts = thread_stat_find(sample->tid); + if (!ts) + return 0; + + seq = get_seq(ts, addr); + if (!seq) + return -ENOMEM; + + switch (seq->state) { + case SEQ_STATE_UNINITIALIZED: + goto end; + case SEQ_STATE_CONTENDED: + contended_term = sample->time - seq->prev_event_time; + ls->wait_time_total += contended_term; + if (contended_term < ls->wait_time_min) + ls->wait_time_min = contended_term; + if (ls->wait_time_max < contended_term) + ls->wait_time_max = contended_term; + break; + case SEQ_STATE_ACQUIRING: + case SEQ_STATE_ACQUIRED: + case SEQ_STATE_READ_ACQUIRED: + case SEQ_STATE_RELEASED: + /* broken lock sequence */ + if (!ls->broken) { + ls->broken = 1; + bad_hist[BROKEN_ACQUIRED]++; + } + list_del_init(&seq->list); + free(seq); + goto end; + default: + BUG_ON("Unknown state of lock sequence found!\n"); + break; + } + + seq->state = SEQ_STATE_ACQUIRED; + ls->nr_acquired++; + ls->avg_wait_time = ls->wait_time_total/ls->nr_acquired; +end: + return 0; +} + /* lock oriented handlers */ /* TODO: handlers for CPU oriented, thread oriented */ static struct trace_lock_handler report_lock_ops = { @@ -834,6 +964,8 @@ static struct trace_lock_handler report_lock_ops = { .acquired_event = report_lock_acquired_event, .contended_event = report_lock_contended_event, .release_event = report_lock_release_event, + .contention_begin_event = report_lock_contention_begin_event, + .contention_end_event = report_lock_contention_end_event, }; static struct trace_lock_handler *trace_handler; @@ -1126,6 +1258,11 @@ static int __cmd_report(bool display_info) goto out_delete; } + if (perf_session__set_tracepoints_handlers(session, contention_tracepoints)) { + pr_err("Initializing perf session tracepoint handlers failed\n"); + goto out_delete; + } + if (setup_output_field(output_fields)) goto out_delete; |