From da97e18458fb42d7c00fac5fd1c56a3896ec666e Mon Sep 17 00:00:00 2001 From: "Joel Fernandes (Google)" Date: Mon, 14 Oct 2019 13:03:08 -0400 Subject: perf_event: Add support for LSM and SELinux checks In current mainline, the degree of access to perf_event_open(2) system call depends on the perf_event_paranoid sysctl. This has a number of limitations: 1. The sysctl is only a single value. Many types of accesses are controlled based on the single value thus making the control very limited and coarse grained. 2. The sysctl is global, so if the sysctl is changed, then that means all processes get access to perf_event_open(2) opening the door to security issues. This patch adds LSM and SELinux access checking which will be used in Android to access perf_event_open(2) for the purposes of attaching BPF programs to tracepoints, perf profiling and other operations from userspace. These operations are intended for production systems. 5 new LSM hooks are added: 1. perf_event_open: This controls access during the perf_event_open(2) syscall itself. The hook is called from all the places that the perf_event_paranoid sysctl is checked to keep it consistent with the systctl. The hook gets passed a 'type' argument which controls CPU, kernel and tracepoint accesses (in this context, CPU, kernel and tracepoint have the same semantics as the perf_event_paranoid sysctl). Additionally, I added an 'open' type which is similar to perf_event_paranoid sysctl == 3 patch carried in Android and several other distros but was rejected in mainline [1] in 2016. 2. perf_event_alloc: This allocates a new security object for the event which stores the current SID within the event. It will be useful when the perf event's FD is passed through IPC to another process which may try to read the FD. Appropriate security checks will limit access. 3. perf_event_free: Called when the event is closed. 4. perf_event_read: Called from the read(2) and mmap(2) syscalls for the event. 5. perf_event_write: Called from the ioctl(2) syscalls for the event. [1] https://lwn.net/Articles/696240/ Since Peter had suggest LSM hooks in 2016 [1], I am adding his Suggested-by tag below. To use this patch, we set the perf_event_paranoid sysctl to -1 and then apply selinux checking as appropriate (default deny everything, and then add policy rules to give access to domains that need it). In the future we can remove the perf_event_paranoid sysctl altogether. Suggested-by: Peter Zijlstra Co-developed-by: Peter Zijlstra Signed-off-by: Joel Fernandes (Google) Signed-off-by: Peter Zijlstra (Intel) Acked-by: James Morris Cc: Arnaldo Carvalho de Melo Cc: rostedt@goodmis.org Cc: Yonghong Song Cc: Kees Cook Cc: Ingo Molnar Cc: Alexei Starovoitov Cc: jeffv@google.com Cc: Jiri Olsa Cc: Daniel Borkmann Cc: primiano@google.com Cc: Song Liu Cc: rsavitski@google.com Cc: Namhyung Kim Cc: Matthew Garrett Link: https://lkml.kernel.org/r/20191014170308.70668-1-joel@joelfernandes.org --- arch/x86/events/intel/bts.c | 8 +++++--- arch/x86/events/intel/core.c | 5 +++-- arch/x86/events/intel/p4.c | 5 +++-- 3 files changed, 11 insertions(+), 7 deletions(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c index 5ee3fed881d3..38de4a7f6752 100644 --- a/arch/x86/events/intel/bts.c +++ b/arch/x86/events/intel/bts.c @@ -549,9 +549,11 @@ static int bts_event_init(struct perf_event *event) * Note that the default paranoia setting permits unprivileged * users to profile the kernel. */ - if (event->attr.exclude_kernel && perf_paranoid_kernel() && - !capable(CAP_SYS_ADMIN)) - return -EACCES; + if (event->attr.exclude_kernel) { + ret = perf_allow_kernel(&event->attr); + if (ret) + return ret; + } if (x86_add_exclusive(x86_lbr_exclusive_bts)) return -EBUSY; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index fcef678c3423..bbf6588d47ee 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3315,8 +3315,9 @@ static int intel_pmu_hw_config(struct perf_event *event) if (x86_pmu.version < 3) return -EINVAL; - if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) - return -EACCES; + ret = perf_allow_cpu(&event->attr); + if (ret) + return ret; event->hw.config |= ARCH_PERFMON_EVENTSEL_ANY; diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c index dee579efb2b2..a4cc66005ce8 100644 --- a/arch/x86/events/intel/p4.c +++ b/arch/x86/events/intel/p4.c @@ -776,8 +776,9 @@ static int p4_validate_raw_event(struct perf_event *event) * the user needs special permissions to be able to use it */ if (p4_ht_active() && p4_event_bind_map[v].shared) { - if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN)) - return -EACCES; + v = perf_allow_cpu(&event->attr); + if (v) + return v; } /* ESCR EventMask bits may be invalid */ -- cgit v1.2.3 From fc1adfe306b71e094df636012f8c0fed971cad45 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Wed, 23 Oct 2019 10:11:04 +0300 Subject: perf/core, perf/x86: Introduce swap_task_ctx() method at 'struct pmu' Declare swap_task_ctx() methods at the generic and x86 specific pmu types to bridge calls to platform specific PMU code on optimized context switch path between equivalent task perf event contexts. Signed-off-by: Alexey Budankov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/9a0aa84a-f062-9b64-3133-373658550c4b@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/perf_event.h | 8 ++++++++ include/linux/perf_event.h | 9 +++++++++ 2 files changed, 17 insertions(+) (limited to 'arch/x86/events') diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index ecacfbf4ebc1..5384317eaa16 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -682,6 +682,14 @@ struct x86_pmu { */ atomic_t lbr_exclusive[x86_lbr_exclusive_max]; + /* + * perf task context (i.e. struct perf_event_context::task_ctx_data) + * switch helper to bridge calls from perf/core to perf/x86. + * See struct pmu::swap_task_ctx() usage for examples; + */ + void (*swap_task_ctx)(struct perf_event_context *prev, + struct perf_event_context *next); + /* * AMD bits */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 4f77b22d47be..011dcbdbccc2 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -410,6 +410,15 @@ struct pmu { */ size_t task_ctx_size; + /* + * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data) + * can be synchronized using this function. See Intel LBR callstack support + * implementation and Perf core context switch handling callbacks for usage + * examples. + */ + void (*swap_task_ctx) (struct perf_event_context *prev, + struct perf_event_context *next); + /* optional */ /* * Set up pmu-private data structures for an AUX area -- cgit v1.2.3 From a44399703b4893de4eadb970867fd5efd4461514 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Wed, 23 Oct 2019 10:11:54 +0300 Subject: perf/x86: Install platform specific ->swap_task_ctx() adapter Bridge perf core and x86 swap_task_ctx() method calls. Signed-off-by: Alexey Budankov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/b157e97d-32c3-aeaf-13ba-47350c677906@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch/x86/events') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 7b21455d7504..6e3f0c18908e 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2243,6 +2243,13 @@ static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in) x86_pmu.sched_task(ctx, sched_in); } +static void x86_pmu_swap_task_ctx(struct perf_event_context *prev, + struct perf_event_context *next) +{ + if (x86_pmu.swap_task_ctx) + x86_pmu.swap_task_ctx(prev, next); +} + void perf_check_microcode(void) { if (x86_pmu.check_microcode) @@ -2297,6 +2304,7 @@ static struct pmu pmu = { .event_idx = x86_pmu_event_idx, .sched_task = x86_pmu_sched_task, .task_ctx_size = sizeof(struct x86_perf_task_context), + .swap_task_ctx = x86_pmu_swap_task_ctx, .check_period = x86_pmu_check_period, .aux_output_match = x86_pmu_aux_output_match, -- cgit v1.2.3 From 421ca868ea3b7c1ca1a541ed6dff3c101a563b95 Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Wed, 23 Oct 2019 10:12:54 +0300 Subject: perf/x86/intel: Implement LBR callstack context synchronization Implement intel_pmu_lbr_swap_task_ctx() method updating counters of the events that requested LBR callstack data on a sample. The counter can be zero for the case when task context belongs to a thread that has just come from a block on a futex and the context contains saved (lbr_stack_state == LBR_VALID) LBR register values. For the values to be restored at LBR registers on the next thread's switch-in event it swaps the counter value with the one that is expected to be non zero at the previous equivalent task perf event context. Swap operation type ensures the previous task perf event context stays consistent with the amount of events that requested LBR callstack data on a sample. Signed-off-by: Alexey Budankov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/261ac742-9022-c3f4-5885-1eae7415b091@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/lbr.c | 23 +++++++++++++++++++++++ arch/x86/events/perf_event.h | 3 +++ 2 files changed, 26 insertions(+) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c index ea54634eabf3..534c76606049 100644 --- a/arch/x86/events/intel/lbr.c +++ b/arch/x86/events/intel/lbr.c @@ -417,6 +417,29 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) cpuc->last_log_id = ++task_ctx->log_id; } +void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, + struct perf_event_context *next) +{ + struct x86_perf_task_context *prev_ctx_data, *next_ctx_data; + + swap(prev->task_ctx_data, next->task_ctx_data); + + /* + * Architecture specific synchronization makes sense in + * case both prev->task_ctx_data and next->task_ctx_data + * pointers are allocated. + */ + + prev_ctx_data = next->task_ctx_data; + next_ctx_data = prev->task_ctx_data; + + if (!prev_ctx_data || !next_ctx_data) + return; + + swap(prev_ctx_data->lbr_callstack_users, + next_ctx_data->lbr_callstack_users); +} + void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in) { struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h index 5384317eaa16..930611db8f9a 100644 --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -1024,6 +1024,9 @@ void intel_pmu_store_pebs_lbrs(struct pebs_lbr *lbr); void intel_ds_init(void); +void intel_pmu_lbr_swap_task_ctx(struct perf_event_context *prev, + struct perf_event_context *next); + void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in); u64 lbr_from_signext_quirk_wr(u64 val); -- cgit v1.2.3 From c2b98a8661514f29a44ebd0925cf4b1503beb48c Mon Sep 17 00:00:00 2001 From: Alexey Budankov Date: Wed, 23 Oct 2019 10:13:56 +0300 Subject: perf/x86: Synchronize PMU task contexts on optimized context switches Install Intel specific PMU task context synchronization adapter and extend optimized context switch path with PMU specific task context synchronization to fix LBR callstack virtualization on context switches. Signed-off-by: Alexey Budankov Signed-off-by: Peter Zijlstra (Intel) Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Arnaldo Carvalho de Melo Cc: Ian Rogers Cc: Jiri Olsa Cc: Kan Liang Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Peter Zijlstra Cc: Song Liu Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/9c6445a9-bdba-ef03-3859-f1f91198f27a@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/core.c | 7 +++++++ kernel/events/core.c | 13 ++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index bbf6588d47ee..dc64b16e6b71 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -3820,6 +3820,12 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx, intel_pmu_lbr_sched_task(ctx, sched_in); } +static void intel_pmu_swap_task_ctx(struct perf_event_context *prev, + struct perf_event_context *next) +{ + intel_pmu_lbr_swap_task_ctx(prev, next); +} + static int intel_pmu_check_period(struct perf_event *event, u64 value) { return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0; @@ -3955,6 +3961,7 @@ static __initconst const struct x86_pmu intel_pmu = { .guest_get_msrs = intel_guest_get_msrs, .sched_task = intel_pmu_sched_task, + .swap_task_ctx = intel_pmu_swap_task_ctx, .check_period = intel_pmu_check_period, diff --git a/kernel/events/core.c b/kernel/events/core.c index 0940c8810be0..f48d38b55e7b 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3204,10 +3204,21 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn, raw_spin_lock(&ctx->lock); raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING); if (context_equiv(ctx, next_ctx)) { + struct pmu *pmu = ctx->pmu; + WRITE_ONCE(ctx->task, next); WRITE_ONCE(next_ctx->task, task); - swap(ctx->task_ctx_data, next_ctx->task_ctx_data); + /* + * PMU specific parts of task perf context can require + * additional synchronization. As an example of such + * synchronization see implementation details of Intel + * LBR call stack data profiling; + */ + if (pmu->swap_task_ctx) + pmu->swap_task_ctx(ctx, next_ctx); + else + swap(ctx->task_ctx_data, next_ctx->task_ctx_data); /* * RCU_INIT_POINTER here is safe because we've not -- cgit v1.2.3 From 8f05c1ff8bfb8cbae0898e5dc6791927d1e5c503 Mon Sep 17 00:00:00 2001 From: Zheng Yongjun Date: Sun, 10 Nov 2019 17:44:53 +0800 Subject: perf/x86/amd: Remove set but not used variable 'active' '-Wunused-but-set-variable' triggers this warning: arch/x86/events/amd/core.c: In function amd_pmu_handle_irq: arch/x86/events/amd/core.c:656:6: warning: variable active set but not used [-Wunused-but-set-variable] GCC is right, 'active' is not used anymore. This variable was introduced earlier this year and then removed in: df4d29732fdad perf/x86/amd: Change/fix NMI latency mitigation to use a timestamp [ mingo: Improved the changelog, fixed build warning caused by this fix, improved surrounding code. ] Reported-by: Hulk Robot Signed-off-by: Zheng Yongjun Cc: Cc: Cc: Cc: Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: https://lkml.kernel.org/r/20191110094453.113001-1-zhengyongjun3@huawei.com Signed-off-by: Ingo Molnar --- arch/x86/events/amd/core.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index 64c3e70b0556..a7752cd78b89 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -652,15 +652,7 @@ static void amd_pmu_disable_event(struct perf_event *event) */ static int amd_pmu_handle_irq(struct pt_regs *regs) { - struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); - int active, handled; - - /* - * Obtain the active count before calling x86_pmu_handle_irq() since - * it is possible that x86_pmu_handle_irq() may make a counter - * inactive (through x86_pmu_stop). - */ - active = __bitmap_weight(cpuc->active_mask, X86_PMC_IDX_MAX); + int handled; /* Process any counter overflows */ handled = x86_pmu_handle_irq(regs); @@ -670,8 +662,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) * NMIs will be claimed if arriving within that window. */ if (handled) { - this_cpu_write(perf_nmi_tstamp, - jiffies + perf_nmi_window); + this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window); return handled; } -- cgit v1.2.3 From 8e105a1fc2a02d78698834974083c980d2e5b513 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 25 Oct 2019 17:08:34 +0300 Subject: perf/x86/intel/pt: Factor out pt_config_start() PT trace is now enabled at the bottom of the event configuration function that takes care of all configuration bits related to a given event, including the address filter update. This is only needed where the event configuration changes, that is, in ->add()/->start(). In the interrupt path we can use a lighter version that keeps the configuration intact, since it hasn't changed, and only flips the enable bit. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: adrian.hunter@intel.com Cc: mathieu.poirier@linaro.org Link: https://lkml.kernel.org/r/20191025140835.53665-3-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 05e43d0f430b..170f3b402274 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -397,6 +397,20 @@ static bool pt_event_valid(struct perf_event *event) * These all are cpu affine and operate on a local PT */ +static void pt_config_start(struct perf_event *event) +{ + struct pt *pt = this_cpu_ptr(&pt_ctx); + u64 ctl = event->hw.config; + + ctl |= RTIT_CTL_TRACEEN; + if (READ_ONCE(pt->vmx_on)) + perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); + else + wrmsrl(MSR_IA32_RTIT_CTL, ctl); + + WRITE_ONCE(event->hw.config, ctl); +} + /* Address ranges and their corresponding msr configuration registers */ static const struct pt_address_range { unsigned long msr_a; @@ -468,7 +482,6 @@ static u64 pt_config_filters(struct perf_event *event) static void pt_config(struct perf_event *event) { - struct pt *pt = this_cpu_ptr(&pt_ctx); u64 reg; /* First round: clear STATUS, in particular the PSB byte counter. */ @@ -501,10 +514,7 @@ static void pt_config(struct perf_event *event) reg |= (event->attr.config & PT_CONFIG_MASK); event->hw.config = reg; - if (READ_ONCE(pt->vmx_on)) - perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_PARTIAL); - else - wrmsrl(MSR_IA32_RTIT_CTL, reg); + pt_config_start(event); } static void pt_config_stop(struct perf_event *event) @@ -1381,7 +1391,7 @@ void intel_pt_interrupt(void) pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, buf->output_off); - pt_config(event); + pt_config_start(event); } } -- cgit v1.2.3 From 25e8920b301c133aeaa9f57d81295bf4ac78e17b Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Fri, 25 Oct 2019 17:08:35 +0300 Subject: perf/x86/intel/pt: Add sampling support Add AUX sampling support to the PT PMU: implement an NMI-safe callback that takes a snapshot of the buffer without touching the event states. This is done for PT events that don't use PMIs, that is, snapshot mode (RO mapping of the AUX area). Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Cc: adrian.hunter@intel.com Cc: mathieu.poirier@linaro.org Link: https://lkml.kernel.org/r/20191025140835.53665-4-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 54 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 54 insertions(+) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 170f3b402274..2f20d5a333c1 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -1208,6 +1208,13 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages, if (!nr_pages) return NULL; + /* + * Only support AUX sampling in snapshot mode, where we don't + * generate NMIs. + */ + if (event->attr.aux_sample_size && !snapshot) + return NULL; + if (cpu == -1) cpu = raw_smp_processor_id(); node = cpu_to_node(cpu); @@ -1506,6 +1513,52 @@ static void pt_event_stop(struct perf_event *event, int mode) } } +static long pt_event_snapshot_aux(struct perf_event *event, + struct perf_output_handle *handle, + unsigned long size) +{ + struct pt *pt = this_cpu_ptr(&pt_ctx); + struct pt_buffer *buf = perf_get_aux(&pt->handle); + unsigned long from = 0, to; + long ret; + + if (WARN_ON_ONCE(!buf)) + return 0; + + /* + * Sampling is only allowed on snapshot events; + * see pt_buffer_setup_aux(). + */ + if (WARN_ON_ONCE(!buf->snapshot)) + return 0; + + /* + * Here, handle_nmi tells us if the tracing is on + */ + if (READ_ONCE(pt->handle_nmi)) + pt_config_stop(event); + + pt_read_offset(buf); + pt_update_head(pt); + + to = local_read(&buf->data_size); + if (to < size) + from = buf->nr_pages << PAGE_SHIFT; + from += to - size; + + ret = perf_output_copy_aux(&pt->handle, handle, from, to); + + /* + * If the tracing was on when we turned up, restart it. + * Compiler barrier not needed as we couldn't have been + * preempted by anything that touches pt->handle_nmi. + */ + if (pt->handle_nmi) + pt_config_start(event); + + return ret; +} + static void pt_event_del(struct perf_event *event, int mode) { pt_event_stop(event, PERF_EF_UPDATE); @@ -1625,6 +1678,7 @@ static __init int pt_init(void) pt_pmu.pmu.del = pt_event_del; pt_pmu.pmu.start = pt_event_start; pt_pmu.pmu.stop = pt_event_stop; + pt_pmu.pmu.snapshot_aux = pt_event_snapshot_aux; pt_pmu.pmu.read = pt_event_read; pt_pmu.pmu.setup_aux = pt_buffer_setup_aux; pt_pmu.pmu.free_aux = pt_buffer_free_aux; -- cgit v1.2.3 From 670638477aede0d7a355ced04b569214aa3feacd Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 5 Nov 2019 10:27:00 +0200 Subject: perf/x86/intel/pt: Opportunistically use single range output mode Most of PT implementations support Single Range Output mode, which is an alternative to ToPA that can be used for a single contiguous buffer and if we don't require an interrupt, that is, in AUX snapshot mode. Now that perf core will use high order allocations for the AUX buffer, in many cases the first condition will also be satisfied. The two most obvious benefits of the Single Range Output mode over the ToPA are: * not having to allocate the ToPA table(s), * not using the ToPA walk hardware. Make use of this functionality where available and appropriate. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Jiri Olsa Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/20191105082701.78442-2-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 118 ++++++++++++++++++++++++++++++++++----------- arch/x86/events/intel/pt.h | 2 + 2 files changed, 92 insertions(+), 28 deletions(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 2f20d5a333c1..c87d163c2917 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -482,6 +482,8 @@ static u64 pt_config_filters(struct perf_event *event) static void pt_config(struct perf_event *event) { + struct pt *pt = this_cpu_ptr(&pt_ctx); + struct pt_buffer *buf = perf_get_aux(&pt->handle); u64 reg; /* First round: clear STATUS, in particular the PSB byte counter. */ @@ -491,7 +493,9 @@ static void pt_config(struct perf_event *event) } reg = pt_config_filters(event); - reg |= RTIT_CTL_TOPA | RTIT_CTL_TRACEEN; + reg |= RTIT_CTL_TRACEEN; + if (!buf->single) + reg |= RTIT_CTL_TOPA; /* * Previously, we had BRANCH_EN on by default, but now that PT has @@ -543,18 +547,6 @@ static void pt_config_stop(struct perf_event *event) wmb(); } -static void pt_config_buffer(void *buf, unsigned int topa_idx, - unsigned int output_off) -{ - u64 reg; - - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(buf)); - - reg = 0x7f | ((u64)topa_idx << 7) | ((u64)output_off << 32); - - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); -} - /** * struct topa - ToPA metadata * @list: linkage to struct pt_buffer's list of tables @@ -612,6 +604,26 @@ static inline phys_addr_t topa_pfn(struct topa *topa) #define TOPA_ENTRY_SIZE(t, i) (sizes(TOPA_ENTRY((t), (i))->size)) #define TOPA_ENTRY_PAGES(t, i) (1 << TOPA_ENTRY((t), (i))->size) +static void pt_config_buffer(struct pt_buffer *buf) +{ + u64 reg, mask; + void *base; + + if (buf->single) { + base = buf->data_pages[0]; + mask = (buf->nr_pages * PAGE_SIZE - 1) >> 7; + } else { + base = topa_to_page(buf->cur)->table; + mask = (u64)buf->cur_idx; + } + + wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(base)); + + reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); + + wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); +} + /** * topa_alloc() - allocate page-sized ToPA table * @cpu: CPU on which to allocate. @@ -812,6 +824,11 @@ static void pt_update_head(struct pt *pt) struct pt_buffer *buf = perf_get_aux(&pt->handle); u64 topa_idx, base, old; + if (buf->single) { + local_set(&buf->data_size, buf->output_off); + return; + } + /* offset of the first region in this table from the beginning of buf */ base = buf->cur->offset + buf->output_off; @@ -913,18 +930,21 @@ static void pt_handle_status(struct pt *pt) */ static void pt_read_offset(struct pt_buffer *buf) { - u64 offset, base_topa; + u64 offset, base; struct topa_page *tp; - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base_topa); - tp = phys_to_virt(base_topa); - buf->cur = &tp->topa; + if (!buf->single) { + rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base); + tp = phys_to_virt(base); + buf->cur = &tp->topa; + } rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset); /* offset within current output region */ buf->output_off = offset >> 32; /* index of current output region within this table */ - buf->cur_idx = (offset & 0xffffff80) >> 7; + if (!buf->single) + buf->cur_idx = (offset & 0xffffff80) >> 7; } static struct topa_entry * @@ -1040,6 +1060,9 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf, unsigned long head = local64_read(&buf->head); unsigned long idx, npages, wakeup; + if (buf->single) + return 0; + /* can't stop in the middle of an output region */ if (buf->output_off + handle->size + 1 < pt_buffer_region_size(buf)) { perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); @@ -1121,13 +1144,17 @@ static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head) if (buf->snapshot) head &= (buf->nr_pages << PAGE_SHIFT) - 1; - pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); - te = pt_topa_entry_for_page(buf, pg); + if (!buf->single) { + pg = (head >> PAGE_SHIFT) & (buf->nr_pages - 1); + te = pt_topa_entry_for_page(buf, pg); - cur_tp = topa_entry_to_page(te); - buf->cur = &cur_tp->topa; - buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); - buf->output_off = head & (pt_buffer_region_size(buf) - 1); + cur_tp = topa_entry_to_page(te); + buf->cur = &cur_tp->topa; + buf->cur_idx = te - TOPA_ENTRY(buf->cur, 0); + buf->output_off = head & (pt_buffer_region_size(buf) - 1); + } else { + buf->output_off = head; + } local64_set(&buf->head, head); local_set(&buf->data_size, 0); @@ -1141,6 +1168,9 @@ static void pt_buffer_fini_topa(struct pt_buffer *buf) { struct topa *topa, *iter; + if (buf->single) + return; + list_for_each_entry_safe(topa, iter, &buf->tables, list) { /* * right now, this is in free_aux() path only, so @@ -1186,6 +1216,36 @@ static int pt_buffer_init_topa(struct pt_buffer *buf, int cpu, return 0; } +static int pt_buffer_try_single(struct pt_buffer *buf, int nr_pages) +{ + struct page *p = virt_to_page(buf->data_pages[0]); + int ret = -ENOTSUPP, order = 0; + + /* + * We can use single range output mode + * + in snapshot mode, where we don't need interrupts; + * + if the hardware supports it; + * + if the entire buffer is one contiguous allocation. + */ + if (!buf->snapshot) + goto out; + + if (!intel_pt_validate_hw_cap(PT_CAP_single_range_output)) + goto out; + + if (PagePrivate(p)) + order = page_private(p); + + if (1 << order != nr_pages) + goto out; + + buf->single = true; + buf->nr_pages = nr_pages; + ret = 0; +out: + return ret; +} + /** * pt_buffer_setup_aux() - set up topa tables for a PT buffer * @cpu: Cpu on which to allocate, -1 means current. @@ -1230,6 +1290,10 @@ pt_buffer_setup_aux(struct perf_event *event, void **pages, INIT_LIST_HEAD(&buf->tables); + ret = pt_buffer_try_single(buf, nr_pages); + if (!ret) + return buf; + ret = pt_buffer_init_topa(buf, cpu, nr_pages, GFP_KERNEL); if (ret) { kfree(buf); @@ -1396,8 +1460,7 @@ void intel_pt_interrupt(void) return; } - pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, - buf->output_off); + pt_config_buffer(buf); pt_config_start(event); } } @@ -1461,8 +1524,7 @@ static void pt_event_start(struct perf_event *event, int mode) WRITE_ONCE(pt->handle_nmi, 1); hwc->state = 0; - pt_config_buffer(topa_to_page(buf->cur)->table, buf->cur_idx, - buf->output_off); + pt_config_buffer(buf); pt_config(event); return; diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 1d2bb7572374..3f7818221b95 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -64,6 +64,7 @@ struct pt_pmu { * @lost: if data was lost/truncated * @head: logical write offset inside the buffer * @snapshot: if this is for a snapshot/overwrite counter + * @single: use Single Range Output instead of ToPA * @stop_pos: STOP topa entry index * @intr_pos: INT topa entry index * @stop_te: STOP topa entry pointer @@ -80,6 +81,7 @@ struct pt_buffer { local_t data_size; local64_t head; bool snapshot; + bool single; long stop_pos, intr_pos; struct topa_entry *stop_te, *intr_te; void **data_pages; -- cgit v1.2.3 From 295c52ee1485e4dee660fc1a0e6ceed6c803c9d3 Mon Sep 17 00:00:00 2001 From: Alexander Shishkin Date: Tue, 5 Nov 2019 10:27:01 +0200 Subject: perf/x86/intel/pt: Prevent redundant WRMSRs With recent optimizations to AUX and PT buffer management code (high order AUX allocations, opportunistic Single Range Output), it is far more likely now that the output MSRs won't need reprogramming on every sched-in. To avoid needless WRMSRs of those registers, cache their values and only write them when needed. Signed-off-by: Alexander Shishkin Signed-off-by: Peter Zijlstra (Intel) Cc: Arnaldo Carvalho de Melo Cc: David Ahern Cc: Jiri Olsa Cc: Jiri Olsa Cc: Linus Torvalds Cc: Mark Rutland Cc: Namhyung Kim Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: https://lkml.kernel.org/r/20191105082701.78442-3-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/events/intel/pt.c | 25 ++++++++++++++++--------- arch/x86/events/intel/pt.h | 10 +++++++--- 2 files changed, 23 insertions(+), 12 deletions(-) (limited to 'arch/x86/events') diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index c87d163c2917..1db7a51d9792 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -606,6 +606,7 @@ static inline phys_addr_t topa_pfn(struct topa *topa) static void pt_config_buffer(struct pt_buffer *buf) { + struct pt *pt = this_cpu_ptr(&pt_ctx); u64 reg, mask; void *base; @@ -617,11 +618,17 @@ static void pt_config_buffer(struct pt_buffer *buf) mask = (u64)buf->cur_idx; } - wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, virt_to_phys(base)); + reg = virt_to_phys(base); + if (pt->output_base != reg) { + pt->output_base = reg; + wrmsrl(MSR_IA32_RTIT_OUTPUT_BASE, reg); + } reg = 0x7f | (mask << 7) | ((u64)buf->output_off << 32); - - wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); + if (pt->output_mask != reg) { + pt->output_mask = reg; + wrmsrl(MSR_IA32_RTIT_OUTPUT_MASK, reg); + } } /** @@ -930,21 +937,21 @@ static void pt_handle_status(struct pt *pt) */ static void pt_read_offset(struct pt_buffer *buf) { - u64 offset, base; + struct pt *pt = this_cpu_ptr(&pt_ctx); struct topa_page *tp; if (!buf->single) { - rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, base); - tp = phys_to_virt(base); + rdmsrl(MSR_IA32_RTIT_OUTPUT_BASE, pt->output_base); + tp = phys_to_virt(pt->output_base); buf->cur = &tp->topa; } - rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, offset); + rdmsrl(MSR_IA32_RTIT_OUTPUT_MASK, pt->output_mask); /* offset within current output region */ - buf->output_off = offset >> 32; + buf->output_off = pt->output_mask >> 32; /* index of current output region within this table */ if (!buf->single) - buf->cur_idx = (offset & 0xffffff80) >> 7; + buf->cur_idx = (pt->output_mask & 0xffffff80) >> 7; } static struct topa_entry * diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h index 3f7818221b95..96906a62aacd 100644 --- a/arch/x86/events/intel/pt.h +++ b/arch/x86/events/intel/pt.h @@ -113,16 +113,20 @@ struct pt_filters { /** * struct pt - per-cpu pt context - * @handle: perf output handle + * @handle: perf output handle * @filters: last configured filters - * @handle_nmi: do handle PT PMI on this cpu, there's an active event - * @vmx_on: 1 if VMX is ON on this cpu + * @handle_nmi: do handle PT PMI on this cpu, there's an active event + * @vmx_on: 1 if VMX is ON on this cpu + * @output_base: cached RTIT_OUTPUT_BASE MSR value + * @output_mask: cached RTIT_OUTPUT_MASK MSR value */ struct pt { struct perf_output_handle handle; struct pt_filters filters; int handle_nmi; int vmx_on; + u64 output_base; + u64 output_mask; }; #endif /* __INTEL_PT_H__ */ -- cgit v1.2.3