summaryrefslogtreecommitdiff
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2021-04-08 13:36:01 +0300
committerPeter Zijlstra <peterz@infradead.org>2021-04-16 17:32:41 +0300
commit97ba62b278674293762c3d91f724f1bb922f04e0 (patch)
treedc0f53789ab2b9329f5dde2092fa549ce6341412 /kernel/events/core.c
parentfb6cc127e0b6e629252cdd0f77d5a1f49db95b92 (diff)
downloadlinux-97ba62b278674293762c3d91f724f1bb922f04e0.tar.xz
perf: Add support for SIGTRAP on perf events
Adds bit perf_event_attr::sigtrap, which can be set to cause events to send SIGTRAP (with si_code TRAP_PERF) to the task where the event occurred. The primary motivation is to support synchronous signals on perf events in the task where an event (such as breakpoints) triggered. To distinguish perf events based on the event type, the type is set in si_errno. For events that are associated with an address, si_addr is copied from perf_sample_data. The new field perf_event_attr::sig_data is copied to si_perf, which allows user space to disambiguate which event (of the same type) triggered the signal. For example, user space could encode the relevant information it cares about in sig_data. We note that the choice of an opaque u64 provides the simplest and most flexible option. Alternatives where a reference to some user space data is passed back suffer from the problem that modification of referenced data (be it the event fd, or the perf_event_attr) can race with the signal being delivered (of course, the same caveat applies if user space decides to store a pointer in sig_data, but the ABI explicitly avoids prescribing such a design). Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Dmitry Vyukov <dvyukov@google.com> Link: https://lore.kernel.org/lkml/YBv3rAT566k+6zjg@hirez.programming.kicks-ass.net/
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c49
1 files changed, 48 insertions, 1 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e4a584bceb7a..6f0723c711a9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6392,6 +6392,33 @@ void perf_event_wakeup(struct perf_event *event)
}
}
+static void perf_sigtrap(struct perf_event *event)
+{
+ struct kernel_siginfo info;
+
+ /*
+ * We'd expect this to only occur if the irq_work is delayed and either
+ * ctx->task or current has changed in the meantime. This can be the
+ * case on architectures that do not implement arch_irq_work_raise().
+ */
+ if (WARN_ON_ONCE(event->ctx->task != current))
+ return;
+
+ /*
+ * perf_pending_event() can race with the task exiting.
+ */
+ if (current->flags & PF_EXITING)
+ return;
+
+ clear_siginfo(&info);
+ info.si_signo = SIGTRAP;
+ info.si_code = TRAP_PERF;
+ info.si_errno = event->attr.type;
+ info.si_perf = event->attr.sig_data;
+ info.si_addr = (void __user *)event->pending_addr;
+ force_sig_info(&info);
+}
+
static void perf_pending_event_disable(struct perf_event *event)
{
int cpu = READ_ONCE(event->pending_disable);
@@ -6401,6 +6428,13 @@ static void perf_pending_event_disable(struct perf_event *event)
if (cpu == smp_processor_id()) {
WRITE_ONCE(event->pending_disable, -1);
+
+ if (event->attr.sigtrap) {
+ perf_sigtrap(event);
+ atomic_set_release(&event->event_limit, 1); /* rearm event */
+ return;
+ }
+
perf_event_disable_local(event);
return;
}
@@ -9103,6 +9137,7 @@ static int __perf_event_overflow(struct perf_event *event,
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
+ event->pending_addr = data->addr;
perf_event_disable_inatomic(event);
}
@@ -11384,6 +11419,10 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
+ if (attr->sigtrap && !task) {
+ /* Requires a task: avoid signalling random tasks. */
+ return ERR_PTR(-EINVAL);
+ }
node = (cpu >= 0) ? cpu_to_node(cpu) : -1;
event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO,
@@ -11432,6 +11471,9 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
event->state = PERF_EVENT_STATE_INACTIVE;
+ if (event->attr.sigtrap)
+ atomic_set(&event->event_limit, 1);
+
if (task) {
event->attach_state = PERF_ATTACH_TASK;
/*
@@ -11710,6 +11752,9 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
if (attr->remove_on_exec && attr->enable_on_exec)
return -EINVAL;
+ if (attr->sigtrap && !attr->remove_on_exec)
+ return -EINVAL;
+
out:
return ret;
@@ -12936,7 +12981,9 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *child_ctx;
if (!event->attr.inherit ||
- (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD))) {
+ (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) ||
+ /* Do not inherit if sigtrap and signal handlers were cleared. */
+ (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) {
*inherited_all = 0;
return 0;
}