summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2025-10-27 11:44:52 +0300
committerIngo Molnar <mingo@kernel.org>2025-11-04 10:32:41 +0300
commit5412910487d0839111e4f2f3a6f33f6c9af9b007 (patch)
tree18e9a4a48326a87bb60f2457edadb82d6ae220df /include
parentdab344753e021fe84c24f9d8b0b63cb5bcf463d7 (diff)
downloadlinux-5412910487d0839111e4f2f3a6f33f6c9af9b007.tar.xz
rseq: Expose lightweight statistics in debugfs
Analyzing the call frequency without actually using tracing is helpful for analysis of this infrastructure. The overhead is minimal as it just increments a per CPU counter associated to each operation. The debugfs readout provides a racy sum of all counters. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Reviewed-by: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Link: https://patch.msgid.link/20251027084307.027916598@linutronix.de
Diffstat (limited to 'include')
-rw-r--r--include/linux/rseq.h16
-rw-r--r--include/linux/rseq_entry.h49
2 files changed, 49 insertions, 16 deletions
diff --git a/include/linux/rseq.h b/include/linux/rseq.h
index a200836a6fe3..7f347c3a4af8 100644
--- a/include/linux/rseq.h
+++ b/include/linux/rseq.h
@@ -29,21 +29,6 @@ static inline void rseq_sched_switch_event(struct task_struct *t)
}
}
-static __always_inline void rseq_exit_to_user_mode(void)
-{
- struct rseq_event *ev = &current->rseq.event;
-
- if (IS_ENABLED(CONFIG_DEBUG_RSEQ))
- WARN_ON_ONCE(ev->sched_switch);
-
- /*
- * Ensure that event (especially user_irq) is cleared when the
- * interrupt did not result in a schedule and therefore the
- * rseq processing did not clear it.
- */
- ev->events = 0;
-}
-
/*
* KVM/HYPERV invoke resume_user_mode_work() before entering guest mode,
* which clears TIF_NOTIFY_RESUME. To avoid updating user space RSEQ in
@@ -92,7 +77,6 @@ static inline void rseq_sched_switch_event(struct task_struct *t) { }
static inline void rseq_virt_userspace_exit(void) { }
static inline void rseq_fork(struct task_struct *t, u64 clone_flags) { }
static inline void rseq_execve(struct task_struct *t) { }
-static inline void rseq_exit_to_user_mode(void) { }
#endif /* !CONFIG_RSEQ */
#ifdef CONFIG_DEBUG_RSEQ
diff --git a/include/linux/rseq_entry.h b/include/linux/rseq_entry.h
index 5be507a127eb..ff9080b89be3 100644
--- a/include/linux/rseq_entry.h
+++ b/include/linux/rseq_entry.h
@@ -2,6 +2,37 @@
#ifndef _LINUX_RSEQ_ENTRY_H
#define _LINUX_RSEQ_ENTRY_H
+/* Must be outside the CONFIG_RSEQ guard to resolve the stubs */
+#ifdef CONFIG_RSEQ_STATS
+#include <linux/percpu.h>
+
+struct rseq_stats {
+ unsigned long exit;
+ unsigned long signal;
+ unsigned long slowpath;
+ unsigned long ids;
+ unsigned long cs;
+ unsigned long clear;
+ unsigned long fixup;
+};
+
+DECLARE_PER_CPU(struct rseq_stats, rseq_stats);
+
+/*
+ * Slow path has interrupts and preemption enabled, but the fast path
+ * runs with interrupts disabled so there is no point in having the
+ * preemption checks implied in __this_cpu_inc() for every operation.
+ */
+#ifdef RSEQ_BUILD_SLOW_PATH
+#define rseq_stat_inc(which) this_cpu_inc((which))
+#else
+#define rseq_stat_inc(which) raw_cpu_inc((which))
+#endif
+
+#else /* CONFIG_RSEQ_STATS */
+#define rseq_stat_inc(x) do { } while (0)
+#endif /* !CONFIG_RSEQ_STATS */
+
#ifdef CONFIG_RSEQ
#include <linux/rseq.h>
@@ -39,8 +70,26 @@ static __always_inline void rseq_note_user_irq_entry(void)
current->rseq.event.user_irq = true;
}
+static __always_inline void rseq_exit_to_user_mode(void)
+{
+ struct rseq_event *ev = &current->rseq.event;
+
+ rseq_stat_inc(rseq_stats.exit);
+
+ if (IS_ENABLED(CONFIG_DEBUG_RSEQ))
+ WARN_ON_ONCE(ev->sched_switch);
+
+ /*
+ * Ensure that event (especially user_irq) is cleared when the
+ * interrupt did not result in a schedule and therefore the
+ * rseq processing did not clear it.
+ */
+ ev->events = 0;
+}
+
#else /* CONFIG_RSEQ */
static inline void rseq_note_user_irq_entry(void) { }
+static inline void rseq_exit_to_user_mode(void) { }
#endif /* !CONFIG_RSEQ */
#endif /* _LINUX_RSEQ_ENTRY_H */