diff options
author | Peter Zijlstra <peterz@infradead.org> | 2017-11-13 16:28:38 +0300 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-03-12 17:28:50 +0300 |
commit | 6668128a9e25f7a11d25359e46df2541e6b43fc9 (patch) | |
tree | 4932b9da602494d85659f27b80e186eb97601cff /kernel/events/core.c | |
parent | 8343aae66167df6708128a778e750d48dbe31302 (diff) | |
download | linux-6668128a9e25f7a11d25359e46df2541e6b43fc9.tar.xz |
perf/core: Optimize ctx_sched_out()
When an event group contains more events than can be scheduled on the
hardware, iterating the full event group for ctx_sched_out is a waste
of time.
Keep track of the events that got programmed on the hardware, such
that we can iterate this smaller list in order to schedule them out.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Dmitri Prokhorov <Dmitry.Prohorov@intel.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Valery Cherepennikov <valery.cherepennikov@intel.com>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r-- | kernel/events/core.c | 53 |
1 files changed, 29 insertions, 24 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 9a07bbe66451..4d601c06074f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1647,14 +1647,6 @@ perf_event_groups_rotate(struct perf_event_groups *groups, int cpu) typeof(*event), node); event; \ event = rb_entry_safe(rb_next(&event->node), \ typeof(*event), node)) -/* - * Iterate event groups with cpu == key. - */ -#define perf_event_groups_for_each_cpu(event, key, groups, node) \ - for (event = perf_event_groups_first(groups, key); \ - event && event->cpu == key; \ - event = rb_entry_safe(rb_next(&event->node), \ - typeof(*event), node)) /* * Add a event from the lists for its context. @@ -1889,8 +1881,9 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) static void perf_group_detach(struct perf_event *event) { struct perf_event *sibling, *tmp; + struct perf_event_context *ctx = event->ctx; - lockdep_assert_held(&event->ctx->lock); + lockdep_assert_held(&ctx->lock); /* * We can have double detach due to exit/hot-unplug + close. @@ -1924,6 +1917,13 @@ static void perf_group_detach(struct perf_event *event) if (!RB_EMPTY_NODE(&event->group_node)) { list_del_init(&sibling->sibling_list); add_event_to_groups(sibling, event->ctx); + + if (sibling->state == PERF_EVENT_STATE_ACTIVE) { + struct list_head *list = sibling->attr.pinned ? + &ctx->pinned_active : &ctx->flexible_active; + + list_add_tail(&sibling->active_list, list); + } } WARN_ON_ONCE(sibling->ctx != event->ctx); @@ -1988,6 +1988,13 @@ event_sched_out(struct perf_event *event, if (event->state != PERF_EVENT_STATE_ACTIVE) return; + /* + * Asymmetry; we only schedule events _IN_ through ctx_sched_in(), but + * we can schedule events _OUT_ individually through things like + * __perf_remove_from_context(). + */ + list_del_init(&event->active_list); + perf_pmu_disable(event->pmu); event->pmu->del(event, 0); @@ -2835,9 +2842,8 @@ static void ctx_sched_out(struct perf_event_context *ctx, struct perf_cpu_context *cpuctx, enum event_type_t event_type) { - int sw = -1, cpu = smp_processor_id(); + struct perf_event *event, *tmp; int is_active = ctx->is_active; - struct perf_event *event; lockdep_assert_held(&ctx->lock); @@ -2884,20 +2890,12 @@ static void ctx_sched_out(struct perf_event_context *ctx, perf_pmu_disable(ctx->pmu); if (is_active & EVENT_PINNED) { - perf_event_groups_for_each_cpu(event, cpu, - &ctx->pinned_groups, group_node) - group_sched_out(event, cpuctx, ctx); - perf_event_groups_for_each_cpu(event, sw, - &ctx->pinned_groups, group_node) + list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) group_sched_out(event, cpuctx, ctx); } if (is_active & EVENT_FLEXIBLE) { - perf_event_groups_for_each_cpu(event, cpu, - &ctx->flexible_groups, group_node) - group_sched_out(event, cpuctx, ctx); - perf_event_groups_for_each_cpu(event, sw, - &ctx->flexible_groups, group_node) + list_for_each_entry_safe(event, tmp, &ctx->flexible_active, active_list) group_sched_out(event, cpuctx, ctx); } perf_pmu_enable(ctx->pmu); @@ -3231,8 +3229,10 @@ static int pinned_sched_in(struct perf_event *event, void *data) if (!event_filter_match(event)) return 0; - if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) - group_sched_in(event, sid->cpuctx, sid->ctx); + if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { + if (!group_sched_in(event, sid->cpuctx, sid->ctx)) + list_add_tail(&event->active_list, &sid->ctx->pinned_active); + } /* * If this pinned group hasn't been scheduled, @@ -3255,7 +3255,9 @@ static int flexible_sched_in(struct perf_event *event, void *data) return 0; if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { - if (group_sched_in(event, sid->cpuctx, sid->ctx)) + if (!group_sched_in(event, sid->cpuctx, sid->ctx)) + list_add_tail(&event->active_list, &sid->ctx->flexible_active); + else sid->can_add_hw = 0; } @@ -3973,6 +3975,8 @@ static void __perf_event_init_context(struct perf_event_context *ctx) perf_event_groups_init(&ctx->pinned_groups); perf_event_groups_init(&ctx->flexible_groups); INIT_LIST_HEAD(&ctx->event_list); + INIT_LIST_HEAD(&ctx->pinned_active); + INIT_LIST_HEAD(&ctx->flexible_active); atomic_set(&ctx->refcount, 1); } @@ -9815,6 +9819,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, INIT_LIST_HEAD(&event->event_entry); INIT_LIST_HEAD(&event->sibling_list); + INIT_LIST_HEAD(&event->active_list); init_event_group(event); INIT_LIST_HEAD(&event->rb_entry); INIT_LIST_HEAD(&event->active_entry); |