summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--include/linux/kasan.h64
-rw-r--r--mm/kasan/common.c4
-rw-r--r--mm/kasan/hw_tags.c22
-rw-r--r--mm/mempool.c6
-rw-r--r--mm/page_alloc.c55
5 files changed, 95 insertions, 56 deletions
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index b1678a61e6a7..a1c7ce5f3e4f 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_KASAN_H
#define _LINUX_KASAN_H
+#include <linux/bug.h>
#include <linux/static_key.h>
#include <linux/types.h>
@@ -79,14 +80,6 @@ static inline void kasan_disable_current(void) {}
#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
-#ifdef CONFIG_KASAN
-
-struct kasan_cache {
- int alloc_meta_offset;
- int free_meta_offset;
- bool is_kmalloc;
-};
-
#ifdef CONFIG_KASAN_HW_TAGS
DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
@@ -101,11 +94,14 @@ static inline bool kasan_has_integrated_init(void)
return kasan_enabled();
}
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
+void kasan_free_pages(struct page *page, unsigned int order);
+
#else /* CONFIG_KASAN_HW_TAGS */
static inline bool kasan_enabled(void)
{
- return true;
+ return IS_ENABLED(CONFIG_KASAN);
}
static inline bool kasan_has_integrated_init(void)
@@ -113,8 +109,30 @@ static inline bool kasan_has_integrated_init(void)
return false;
}
+static __always_inline void kasan_alloc_pages(struct page *page,
+ unsigned int order, gfp_t flags)
+{
+ /* Only available for integrated init. */
+ BUILD_BUG();
+}
+
+static __always_inline void kasan_free_pages(struct page *page,
+ unsigned int order)
+{
+ /* Only available for integrated init. */
+ BUILD_BUG();
+}
+
#endif /* CONFIG_KASAN_HW_TAGS */
+#ifdef CONFIG_KASAN
+
+struct kasan_cache {
+ int alloc_meta_offset;
+ int free_meta_offset;
+ bool is_kmalloc;
+};
+
slab_flags_t __kasan_never_merge(void);
static __always_inline slab_flags_t kasan_never_merge(void)
{
@@ -130,20 +148,20 @@ static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
__kasan_unpoison_range(addr, size);
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_alloc_pages(struct page *page,
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_poison_pages(struct page *page,
unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_alloc_pages(page, order, init);
+ __kasan_poison_pages(page, order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order, bool init);
-static __always_inline void kasan_free_pages(struct page *page,
- unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
+static __always_inline void kasan_unpoison_pages(struct page *page,
+ unsigned int order, bool init)
{
if (kasan_enabled())
- __kasan_free_pages(page, order, init);
+ __kasan_unpoison_pages(page, order, init);
}
void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
@@ -285,21 +303,15 @@ void kasan_restore_multi_shot(bool enabled);
#else /* CONFIG_KASAN */
-static inline bool kasan_enabled(void)
-{
- return false;
-}
-static inline bool kasan_has_integrated_init(void)
-{
- return false;
-}
static inline slab_flags_t kasan_never_merge(void)
{
return 0;
}
static inline void kasan_unpoison_range(const void *address, size_t size) {}
-static inline void kasan_alloc_pages(struct page *page, unsigned int order, bool init) {}
-static inline void kasan_free_pages(struct page *page, unsigned int order, bool init) {}
+static inline void kasan_poison_pages(struct page *page, unsigned int order,
+ bool init) {}
+static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
+ bool init) {}
static inline void kasan_cache_create(struct kmem_cache *cache,
unsigned int *size,
slab_flags_t *flags) {}
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index 6bb87f2acd4e..0ecd293af344 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -97,7 +97,7 @@ slab_flags_t __kasan_never_merge(void)
return 0;
}
-void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
+void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init)
{
u8 tag;
unsigned long i;
@@ -111,7 +111,7 @@ void __kasan_alloc_pages(struct page *page, unsigned int order, bool init)
kasan_unpoison(page_address(page), PAGE_SIZE << order, init);
}
-void __kasan_free_pages(struct page *page, unsigned int order, bool init)
+void __kasan_poison_pages(struct page *page, unsigned int order, bool init)
{
if (likely(!PageHighMem(page)))
kasan_poison(page_address(page), PAGE_SIZE << order,
diff --git a/mm/kasan/hw_tags.c b/mm/kasan/hw_tags.c
index 4004388b4e4b..9d0f6f934016 100644
--- a/mm/kasan/hw_tags.c
+++ b/mm/kasan/hw_tags.c
@@ -238,6 +238,28 @@ struct kasan_track *kasan_get_free_track(struct kmem_cache *cache,
return &alloc_meta->free_track[0];
}
+void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags)
+{
+ /*
+ * This condition should match the one in post_alloc_hook() in
+ * page_alloc.c.
+ */
+ bool init = !want_init_on_free() && want_init_on_alloc(flags);
+
+ kasan_unpoison_pages(page, order, init);
+}
+
+void kasan_free_pages(struct page *page, unsigned int order)
+{
+ /*
+ * This condition should match the one in free_pages_prepare() in
+ * page_alloc.c.
+ */
+ bool init = want_init_on_free();
+
+ kasan_poison_pages(page, order, init);
+}
+
#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
void kasan_set_tagging_report_once(bool state)
diff --git a/mm/mempool.c b/mm/mempool.c
index a258cf4de575..0b8afbec3e35 100644
--- a/mm/mempool.c
+++ b/mm/mempool.c
@@ -106,7 +106,8 @@ static __always_inline void kasan_poison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_slab_free_mempool(element);
else if (pool->alloc == mempool_alloc_pages)
- kasan_free_pages(element, (unsigned long)pool->pool_data, false);
+ kasan_poison_pages(element, (unsigned long)pool->pool_data,
+ false);
}
static void kasan_unpoison_element(mempool_t *pool, void *element)
@@ -114,7 +115,8 @@ static void kasan_unpoison_element(mempool_t *pool, void *element)
if (pool->alloc == mempool_alloc_slab || pool->alloc == mempool_kmalloc)
kasan_unpoison_range(element, __ksize(element));
else if (pool->alloc == mempool_alloc_pages)
- kasan_alloc_pages(element, (unsigned long)pool->pool_data, false);
+ kasan_unpoison_pages(element, (unsigned long)pool->pool_data,
+ false);
}
static __always_inline void add_element(mempool_t *pool, void *element)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aaa1655cf682..4fddb7cac3c6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -382,7 +382,7 @@ int page_group_by_mobility_disabled __read_mostly;
static DEFINE_STATIC_KEY_TRUE(deferred_pages);
/*
- * Calling kasan_free_pages() only after deferred memory initialization
+ * Calling kasan_poison_pages() only after deferred memory initialization
* has completed. Poisoning pages during deferred memory init will greatly
* lengthen the process and cause problem in large memory systems as the
* deferred pages initialization is done with interrupt disabled.
@@ -394,15 +394,11 @@ static DEFINE_STATIC_KEY_TRUE(deferred_pages);
* on-demand allocation and then freed again before the deferred pages
* initialization is done, but this is not likely to happen.
*/
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
{
- if (static_branch_unlikely(&deferred_pages))
- return;
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- (fpi_flags & FPI_SKIP_KASAN_POISON))
- return;
- kasan_free_pages(page, order, init);
+ return static_branch_unlikely(&deferred_pages) ||
+ (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ (fpi_flags & FPI_SKIP_KASAN_POISON));
}
/* Returns true if the struct page for the pfn is uninitialised */
@@ -453,13 +449,10 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
return false;
}
#else
-static inline void kasan_free_nondeferred_pages(struct page *page, int order,
- bool init, fpi_t fpi_flags)
+static inline bool should_skip_kasan_poison(fpi_t fpi_flags)
{
- if (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
- (fpi_flags & FPI_SKIP_KASAN_POISON))
- return;
- kasan_free_pages(page, order, init);
+ return (!IS_ENABLED(CONFIG_KASAN_GENERIC) &&
+ (fpi_flags & FPI_SKIP_KASAN_POISON));
}
static inline bool early_page_uninitialised(unsigned long pfn)
@@ -1245,7 +1238,7 @@ static __always_inline bool free_pages_prepare(struct page *page,
unsigned int order, bool check_free, fpi_t fpi_flags)
{
int bad = 0;
- bool init;
+ bool skip_kasan_poison = should_skip_kasan_poison(fpi_flags);
VM_BUG_ON_PAGE(PageTail(page), page);
@@ -1314,10 +1307,17 @@ static __always_inline bool free_pages_prepare(struct page *page,
* With hardware tag-based KASAN, memory tags must be set before the
* page becomes unavailable via debug_pagealloc or arch_free_page.
*/
- init = want_init_on_free();
- if (init && !kasan_has_integrated_init())
- kernel_init_free_pages(page, 1 << order);
- kasan_free_nondeferred_pages(page, order, init, fpi_flags);
+ if (kasan_has_integrated_init()) {
+ if (!skip_kasan_poison)
+ kasan_free_pages(page, order);
+ } else {
+ bool init = want_init_on_free();
+
+ if (init)
+ kernel_init_free_pages(page, 1 << order);
+ if (!skip_kasan_poison)
+ kasan_poison_pages(page, order, init);
+ }
/*
* arch_free_page() can make the page's contents inaccessible. s390
@@ -2324,8 +2324,6 @@ static bool check_new_pages(struct page *page, unsigned int order)
inline void post_alloc_hook(struct page *page, unsigned int order,
gfp_t gfp_flags)
{
- bool init;
-
set_page_private(page, 0);
set_page_refcounted(page);
@@ -2344,10 +2342,15 @@ inline void post_alloc_hook(struct page *page, unsigned int order,
* kasan_alloc_pages and kernel_init_free_pages must be
* kept together to avoid discrepancies in behavior.
*/
- init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
- kasan_alloc_pages(page, order, init);
- if (init && !kasan_has_integrated_init())
- kernel_init_free_pages(page, 1 << order);
+ if (kasan_has_integrated_init()) {
+ kasan_alloc_pages(page, order, gfp_flags);
+ } else {
+ bool init = !want_init_on_free() && want_init_on_alloc(gfp_flags);
+
+ kasan_unpoison_pages(page, order, init);
+ if (init)
+ kernel_init_free_pages(page, 1 << order);
+ }
set_page_owner(page, order, gfp_flags);
}