summaryrefslogtreecommitdiff
path: root/kernel/kcsan/core.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/kcsan/core.c')
-rw-r--r--kernel/kcsan/core.c44
1 files changed, 38 insertions, 6 deletions
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 82c2bef827d4..87ef01e40199 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -56,7 +56,7 @@ static DEFINE_PER_CPU(struct kcsan_ctx, kcsan_cpu_ctx) = {
/*
* SLOT_IDX_FAST is used in the fast-path. Not first checking the address's primary
- * slot (middle) is fine if we assume that data races occur rarely. The set of
+ * slot (middle) is fine if we assume that races occur rarely. The set of
* indices {SLOT_IDX(slot, i) | i in [0, NUM_SLOTS)} is equivalent to
* {SLOT_IDX_FAST(slot, i) | i in [0, NUM_SLOTS)}.
*/
@@ -178,6 +178,14 @@ is_atomic(const volatile void *ptr, size_t size, int type)
if ((type & KCSAN_ACCESS_ATOMIC) != 0)
return true;
+ /*
+ * Unless explicitly declared atomic, never consider an assertion access
+ * as atomic. This allows using them also in atomic regions, such as
+ * seqlocks, without implicitly changing their semantics.
+ */
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ return false;
+
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
(type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
IS_ALIGNED((unsigned long)ptr, size))
@@ -298,7 +306,11 @@ static noinline void kcsan_found_watchpoint(const volatile void *ptr,
*/
kcsan_counter_inc(KCSAN_COUNTER_REPORT_RACES);
}
- kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
+
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+ else
+ kcsan_counter_inc(KCSAN_COUNTER_DATA_RACES);
user_access_restore(flags);
}
@@ -307,6 +319,7 @@ static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ const bool is_assert = (type & KCSAN_ACCESS_ASSERT) != 0;
atomic_long_t *watchpoint;
union {
u8 _1;
@@ -429,13 +442,32 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
/*
* No need to increment 'data_races' counter, as the racing
* thread already did.
+ *
+ * Count 'assert_failures' for each failed ASSERT access,
+ * therefore both this thread and the racing thread may
+ * increment this counter.
*/
- kcsan_report(ptr, size, type, size > 8 || value_change,
- smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL);
+ if (is_assert)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ /*
+ * - If we were not able to observe a value change due to size
+ * constraints, always assume a value change.
+ * - If the access type is an assertion, we also always assume a
+ * value change to always report the race.
+ */
+ value_change = value_change || size > 8 || is_assert;
+
+ kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+ KCSAN_REPORT_RACE_SIGNAL);
} else if (value_change) {
/* Inferring a race, since the value should not have changed. */
+
kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
- if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
+ if (is_assert)
+ kcsan_counter_inc(KCSAN_COUNTER_ASSERT_FAILURES);
+
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assert)
kcsan_report(ptr, size, type, true,
smp_processor_id(),
KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
@@ -471,7 +503,7 @@ static __always_inline void check_access(const volatile void *ptr, size_t size,
&encoded_watchpoint);
/*
* It is safe to check kcsan_is_enabled() after find_watchpoint in the
- * slow-path, as long as no state changes that cause a data race to be
+ * slow-path, as long as no state changes that cause a race to be
* detected and reported have occurred until kcsan_is_enabled() is
* checked.
*/