summaryrefslogtreecommitdiff
path: root/mm/kmsan
diff options
context:
space:
mode:
Diffstat (limited to 'mm/kmsan')
-rw-r--r--mm/kmsan/hooks.c1
-rw-r--r--mm/kmsan/instrumentation.c8
-rw-r--r--mm/kmsan/kmsan_test.c4
3 files changed, 10 insertions, 3 deletions
diff --git a/mm/kmsan/hooks.c b/mm/kmsan/hooks.c
index 35f6b6e6a908..3807502766a3 100644
--- a/mm/kmsan/hooks.c
+++ b/mm/kmsan/hooks.c
@@ -260,6 +260,7 @@ void kmsan_handle_urb(const struct urb *urb, bool is_out)
urb->transfer_buffer_length,
/*checked*/ false);
}
+EXPORT_SYMBOL_GPL(kmsan_handle_urb);
static void kmsan_handle_dma_page(const void *addr, size_t size,
enum dma_data_direction dir)
diff --git a/mm/kmsan/instrumentation.c b/mm/kmsan/instrumentation.c
index 271f135f97a1..770fe02904f3 100644
--- a/mm/kmsan/instrumentation.c
+++ b/mm/kmsan/instrumentation.c
@@ -81,12 +81,16 @@ DECLARE_METADATA_PTR_GETTER(8);
* Handle a memory store performed by inline assembly. KMSAN conservatively
* attempts to unpoison the outputs of asm() directives to prevent false
* positives caused by missed stores.
+ *
+ * __msan_instrument_asm_store() may be called for inline assembly code when
+ * entering or leaving IRQ. We omit the check for kmsan_in_runtime() to ensure
+ * the memory written to in these cases is also marked as initialized.
*/
void __msan_instrument_asm_store(void *addr, uintptr_t size)
{
unsigned long ua_flags;
- if (!kmsan_enabled || kmsan_in_runtime())
+ if (!kmsan_enabled)
return;
ua_flags = user_access_save();
@@ -103,10 +107,8 @@ void __msan_instrument_asm_store(void *addr, uintptr_t size)
user_access_restore(ua_flags);
return;
}
- kmsan_enter_runtime();
/* Unpoisoning the memory on best effort. */
kmsan_internal_unpoison_memory(addr, size, /*checked*/ false);
- kmsan_leave_runtime();
user_access_restore(ua_flags);
}
EXPORT_SYMBOL(__msan_instrument_asm_store);
diff --git a/mm/kmsan/kmsan_test.c b/mm/kmsan/kmsan_test.c
index 9a29ea2dbfb9..088e21a48dc4 100644
--- a/mm/kmsan/kmsan_test.c
+++ b/mm/kmsan/kmsan_test.c
@@ -22,6 +22,7 @@
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/tracepoint.h>
+#include <linux/vmalloc.h>
#include <trace/events/printk.h>
static DEFINE_PER_CPU(int, per_cpu_var);
@@ -419,6 +420,7 @@ static void test_memcpy_aligned_to_aligned(struct kunit *test)
kunit_info(
test,
"memcpy()ing aligned uninit src to aligned dst (UMR report)\n");
+ OPTIMIZER_HIDE_VAR(uninit_src);
memcpy((void *)&dst, (void *)&uninit_src, sizeof(uninit_src));
kmsan_check_memory((void *)&dst, sizeof(dst));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
@@ -441,6 +443,7 @@ static void test_memcpy_aligned_to_unaligned(struct kunit *test)
kunit_info(
test,
"memcpy()ing aligned uninit src to unaligned dst (UMR report)\n");
+ OPTIMIZER_HIDE_VAR(uninit_src);
memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
kmsan_check_memory((void *)dst, 4);
KUNIT_EXPECT_TRUE(test, report_matches(&expect));
@@ -464,6 +467,7 @@ static void test_memcpy_aligned_to_unaligned2(struct kunit *test)
kunit_info(
test,
"memcpy()ing aligned uninit src to unaligned dst - part 2 (UMR report)\n");
+ OPTIMIZER_HIDE_VAR(uninit_src);
memcpy((void *)&dst[1], (void *)&uninit_src, sizeof(uninit_src));
kmsan_check_memory((void *)&dst[4], sizeof(uninit_src));
KUNIT_EXPECT_TRUE(test, report_matches(&expect));