summaryrefslogtreecommitdiff
path: root/kernel/kcsan/debugfs.c
diff options
context:
space:
mode:
authorMarco Elver <elver@google.com>2020-02-25 17:32:58 +0300
committerPaul E. McKenney <paulmck@kernel.org>2020-03-25 19:56:00 +0300
commit44656d3dc4f0dc20010d054f27397a4a1469fabf (patch)
tree3cc0c8938304bb29992524788e001270d1afb4ca /kernel/kcsan/debugfs.c
parent2402d0eae589a31ee7b1774cb220d84d0f5605b4 (diff)
downloadlinux-44656d3dc4f0dc20010d054f27397a4a1469fabf.tar.xz
kcsan: Add current->state to implicitly atomic accesses
Add volatile current->state to list of implicitly atomic accesses. This is in preparation to eventually enable KCSAN on kernel/sched (which currently still has KCSAN_SANITIZE := n). Since accesses that match the special check in atomic.h are rare, it makes more sense to move this check to the slow-path, avoiding the additional compare in the fast-path. With the microbenchmark, a speedup of ~6% is measured. Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
Diffstat (limited to 'kernel/kcsan/debugfs.c')
-rw-r--r--kernel/kcsan/debugfs.c27
1 files changed, 18 insertions, 9 deletions
diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index 2ff196123977..72ee188ebc54 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -74,25 +74,34 @@ void kcsan_counter_dec(enum kcsan_counter_id id)
*/
static noinline void microbenchmark(unsigned long iters)
{
+ const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+ const bool was_enabled = READ_ONCE(kcsan_enabled);
cycles_t cycles;
+ /* We may have been called from an atomic region; reset context. */
+ memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+ /*
+ * Disable to benchmark fast-path for all accesses, and (expected
+ * negligible) call into slow-path, but never set up watchpoints.
+ */
+ WRITE_ONCE(kcsan_enabled, false);
+
pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
cycles = get_cycles();
while (iters--) {
- /*
- * We can run this benchmark from multiple tasks; this address
- * calculation increases likelyhood of some accesses
- * overlapping. Make the access type an atomic read, to never
- * set up watchpoints and test the fast-path only.
- */
- unsigned long addr =
- iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
- __kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
+ unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
+ int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
+ (!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
+ __kcsan_check_access((void *)addr, sizeof(long), type);
}
cycles = get_cycles() - cycles;
pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+
+ WRITE_ONCE(kcsan_enabled, was_enabled);
+ /* restore context */
+ current->kcsan_ctx = ctx_save;
}
/*