diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-23 00:38:17 +0300 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-12-23 00:38:17 +0300 |
commit | 1375b9803e007842493c64d0d73d7dd0e385e17c (patch) | |
tree | bbdba09ad6c044f845a9dc553a88016417c8ad6b /arch | |
parent | c45647f9f562b52915b43b6bb447827cebf511bd (diff) | |
parent | 625d867347c9e84d1ac3c953e1b689f65b603bed (diff) | |
download | linux-1375b9803e007842493c64d0d73d7dd0e385e17c.tar.xz |
Merge branch 'akpm' (patches from Andrew)
Merge KASAN updates from Andrew Morton.
This adds a new hardware tag-based mode to KASAN. The new mode is
similar to the existing software tag-based KASAN, but relies on arm64
Memory Tagging Extension (MTE) to perform memory and pointer tagging
(instead of shadow memory and compiler instrumentation).
By Andrey Konovalov and Vincenzo Frascino.
* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (60 commits)
kasan: update documentation
kasan, mm: allow cache merging with no metadata
kasan: sanitize objects when metadata doesn't fit
kasan: clarify comment in __kasan_kfree_large
kasan: simplify assign_tag and set_tag calls
kasan: don't round_up too much
kasan, mm: rename kasan_poison_kfree
kasan, mm: check kasan_enabled in annotations
kasan: add and integrate kasan boot parameters
kasan: inline (un)poison_range and check_invalid_free
kasan: open-code kasan_unpoison_slab
kasan: inline random_tag for HW_TAGS
kasan: inline kasan_reset_tag for tag-based modes
kasan: remove __kasan_unpoison_stack
kasan: allow VMAP_STACK for HW_TAGS mode
kasan, arm64: unpoison stack only with CONFIG_KASAN_STACK
kasan: introduce set_alloc_info
kasan: rename get_alloc/free_info
kasan: simplify quarantine_put call site
kselftest/arm64: check GCR_EL1 after context switch
...
Diffstat (limited to 'arch')
36 files changed, 466 insertions, 51 deletions
diff --git a/arch/Kconfig b/arch/Kconfig index a08999dfcf16..78c6f05b10f9 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -976,16 +976,16 @@ config VMAP_STACK default y bool "Use a virtually-mapped stack" depends on HAVE_ARCH_VMAP_STACK - depends on !KASAN || KASAN_VMALLOC + depends on !KASAN || KASAN_HW_TAGS || KASAN_VMALLOC help Enable this if you want the use virtually-mapped kernel stacks with guard pages. This causes kernel stack overflows to be caught immediately rather than causing difficult-to-diagnose corruption. - To use this with KASAN, the architecture must support backing - virtual mappings with real shadow memory, and KASAN_VMALLOC must - be enabled. + To use this with software KASAN modes, the architecture must support + backing virtual mappings with real shadow memory, and KASAN_VMALLOC + must be enabled. config ARCH_OPTIONAL_KERNEL_RWX def_bool n diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 81463eb537bb..d0d94f77d000 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -137,6 +137,7 @@ config ARM64 select HAVE_ARCH_JUMP_LABEL_RELATIVE select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48) select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN + select HAVE_ARCH_KASAN_HW_TAGS if (HAVE_ARCH_KASAN && ARM64_MTE) select HAVE_ARCH_KGDB select HAVE_ARCH_MMAP_RND_BITS select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT @@ -334,7 +335,7 @@ config BROKEN_GAS_INST config KASAN_SHADOW_OFFSET hex - depends on KASAN + depends on KASAN_GENERIC || KASAN_SW_TAGS default 0xdfff800000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfffc00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdffffe0000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS @@ -1571,6 +1572,9 @@ endmenu menu "ARMv8.5 architectural features" +config AS_HAS_ARMV8_5 + def_bool $(cc-option,-Wa$(comma)-march=armv8.5-a) + config ARM64_BTI bool "Branch Target Identification support" default y @@ -1645,6 +1649,9 @@ config ARM64_MTE bool "Memory Tagging Extension support" default y depends on ARM64_AS_HAS_MTE && ARM64_TAGGED_ADDR_ABI + depends on AS_HAS_ARMV8_5 + # Required for tag checking in the uaccess routines + depends on ARM64_PAN select ARCH_USES_HIGH_VMA_FLAGS help Memory Tagging (part of the ARMv8.5 Extensions) provides diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 6a87d592bd00..6be9b3750250 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -96,6 +96,11 @@ ifeq ($(CONFIG_AS_HAS_ARMV8_4), y) asm-arch := armv8.4-a endif +ifeq ($(CONFIG_AS_HAS_ARMV8_5), y) +# make sure to pass the newest target architecture to -march. +asm-arch := armv8.5-a +endif + ifdef asm-arch KBUILD_CFLAGS += -Wa,-march=$(asm-arch) \ -DARM64_ASM_ARCH='"$(asm-arch)"' @@ -132,7 +137,7 @@ head-y := arch/arm64/kernel/head.o ifeq ($(CONFIG_KASAN_SW_TAGS), y) KASAN_SHADOW_SCALE_SHIFT := 4 -else +else ifeq ($(CONFIG_KASAN_GENERIC), y) KASAN_SHADOW_SCALE_SHIFT := 3 endif diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ddbe6bf00e33..bf125c591116 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -473,7 +473,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU #define NOKPROBE(x) #endif -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #define EXPORT_SYMBOL_NOKASAN(name) #else #define EXPORT_SYMBOL_NOKASAN(name) EXPORT_SYMBOL(name) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 63d43b5f82f6..77cbbe3625f2 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -6,6 +6,7 @@ #define __ASM_CACHE_H #include <asm/cputype.h> +#include <asm/mte-kasan.h> #define CTR_L1IP_SHIFT 14 #define CTR_L1IP_MASK 3 @@ -51,6 +52,8 @@ #ifdef CONFIG_KASAN_SW_TAGS #define ARCH_SLAB_MINALIGN (1ULL << KASAN_SHADOW_SCALE_SHIFT) +#elif defined(CONFIG_KASAN_HW_TAGS) +#define ARCH_SLAB_MINALIGN MTE_GRANULE_SIZE #endif #ifndef __ASSEMBLY__ diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 85a3e49f92f4..29f97eb3dad4 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -106,6 +106,7 @@ #define ESR_ELx_FSC_TYPE (0x3C) #define ESR_ELx_FSC_LEVEL (0x03) #define ESR_ELx_FSC_EXTABT (0x10) +#define ESR_ELx_FSC_MTE (0x11) #define ESR_ELx_FSC_SERROR (0x11) #define ESR_ELx_FSC_ACCESS (0x08) #define ESR_ELx_FSC_FAULT (0x04) diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index b0dc4abc3589..0aaf9044cd6a 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -12,7 +12,9 @@ #define arch_kasan_reset_tag(addr) __tag_reset(addr) #define arch_kasan_get_tag(addr) __tag_get(addr) -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + +void kasan_init(void); /* * KASAN_SHADOW_START: beginning of the kernel virtual addresses. @@ -33,7 +35,6 @@ #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) #define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) -void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); asmlinkage void kasan_early_init(void); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 556cb2d62b5b..18fce223b67b 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -72,7 +72,7 @@ * address space for the shadow region respectively. They can bloat the stack * significantly, so double the (minimum) stack size when they are in use. */ -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) #define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) #define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + KASAN_SHADOW_OFFSET) @@ -214,7 +214,7 @@ static inline unsigned long kaslr_offset(void) (__force __typeof__(addr))__addr; \ }) -#ifdef CONFIG_KASAN_SW_TAGS +#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS) #define __tag_shifted(tag) ((u64)(tag) << 56) #define __tag_reset(addr) __untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) @@ -222,7 +222,7 @@ static inline unsigned long kaslr_offset(void) #define __tag_shifted(tag) 0UL #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 -#endif /* CONFIG_KASAN_SW_TAGS */ +#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */ static inline const void *__tag_set(const void *addr, u8 tag) { @@ -230,6 +230,15 @@ static inline const void *__tag_set(const void *addr, u8 tag) return (const void *)(__addr | __tag_shifted(tag)); } +#ifdef CONFIG_KASAN_HW_TAGS +#define arch_enable_tagging() mte_enable_kernel() +#define arch_init_tags(max_tag) mte_init_tags(max_tag) +#define arch_get_random_tag() mte_get_random_tag() +#define arch_get_mem_tag(addr) mte_get_mem_tag(addr) +#define arch_set_mem_tag_range(addr, size, tag) \ + mte_set_mem_tag_range((addr), (size), (tag)) +#endif /* CONFIG_KASAN_HW_TAGS */ + /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h diff --git a/arch/arm64/include/asm/mte-def.h b/arch/arm64/include/asm/mte-def.h new file mode 100644 index 000000000000..2d73a1612f09 --- /dev/null +++ b/arch/arm64/include/asm/mte-def.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_MTE_DEF_H +#define __ASM_MTE_DEF_H + +#define MTE_GRANULE_SIZE UL(16) +#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) +#define MTE_TAG_SHIFT 56 +#define MTE_TAG_SIZE 4 +#define MTE_TAG_MASK GENMASK((MTE_TAG_SHIFT + (MTE_TAG_SIZE - 1)), MTE_TAG_SHIFT) + +#endif /* __ASM_MTE_DEF_H */ diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h new file mode 100644 index 000000000000..26349a4b5e2e --- /dev/null +++ b/arch/arm64/include/asm/mte-kasan.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 ARM Ltd. + */ +#ifndef __ASM_MTE_KASAN_H +#define __ASM_MTE_KASAN_H + +#include <asm/mte-def.h> + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +/* + * The functions below are meant to be used only for the + * KASAN_HW_TAGS interface defined in asm/memory.h. + */ +#ifdef CONFIG_ARM64_MTE + +static inline u8 mte_get_ptr_tag(void *ptr) +{ + /* Note: The format of KASAN tags is 0xF<x> */ + u8 tag = 0xF0 | (u8)(((u64)(ptr)) >> MTE_TAG_SHIFT); + + return tag; +} + +u8 mte_get_mem_tag(void *addr); +u8 mte_get_random_tag(void); +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag); + +void mte_enable_kernel(void); +void mte_init_tags(u64 max_tag); + +#else /* CONFIG_ARM64_MTE */ + +static inline u8 mte_get_ptr_tag(void *ptr) +{ + return 0xFF; +} + +static inline u8 mte_get_mem_tag(void *addr) +{ + return 0xFF; +} +static inline u8 mte_get_random_tag(void) +{ + return 0xFF; +} +static inline void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + return addr; +} + +static inline void mte_enable_kernel(void) +{ +} + +static inline void mte_init_tags(u64 max_tag) +{ +} + +#endif /* CONFIG_ARM64_MTE */ + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_MTE_KASAN_H */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 1c99fcadb58c..d02aff9f493d 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -5,17 +5,21 @@ #ifndef __ASM_MTE_H #define __ASM_MTE_H -#define MTE_GRANULE_SIZE UL(16) -#define MTE_GRANULE_MASK (~(MTE_GRANULE_SIZE - 1)) -#define MTE_TAG_SHIFT 56 -#define MTE_TAG_SIZE 4 +#include <asm/compiler.h> +#include <asm/mte-def.h> + +#define __MTE_PREAMBLE ARM64_ASM_PREAMBLE ".arch_extension memtag\n" #ifndef __ASSEMBLY__ +#include <linux/bitfield.h> #include <linux/page-flags.h> +#include <linux/types.h> #include <asm/pgtable-types.h> +extern u64 gcr_kernel_excl; + void mte_clear_page_tags(void *addr); unsigned long mte_copy_tags_from_user(void *to, const void __user *from, unsigned long n); @@ -45,7 +49,9 @@ long get_mte_ctrl(struct task_struct *task); int mte_ptrace_copy_tags(struct task_struct *child, long request, unsigned long addr, unsigned long data); -#else +void mte_assign_mem_tag_range(void *addr, size_t size); + +#else /* CONFIG_ARM64_MTE */ /* unused if !CONFIG_ARM64_MTE, silence the compiler */ #define PG_mte_tagged 0 @@ -80,7 +86,11 @@ static inline int mte_ptrace_copy_tags(struct task_struct *child, return -EIO; } -#endif +static inline void mte_assign_mem_tag_range(void *addr, size_t size) +{ +} + +#endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ #endif /* __ASM_MTE_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 724249f37af5..ca2cd75d3286 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -152,7 +152,7 @@ struct thread_struct { #endif #ifdef CONFIG_ARM64_MTE u64 sctlr_tcf0; - u64 gcr_user_incl; + u64 gcr_user_excl; #endif }; diff --git a/arch/arm64/include/asm/string.h b/arch/arm64/include/asm/string.h index b31e8e87a0db..3a3264ff47b9 100644 --- a/arch/arm64/include/asm/string.h +++ b/arch/arm64/include/asm/string.h @@ -5,7 +5,7 @@ #ifndef __ASM_STRING_H #define __ASM_STRING_H -#ifndef CONFIG_KASAN +#if !(defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) #define __HAVE_ARCH_STRRCHR extern char *strrchr(const char *, int c); @@ -48,7 +48,8 @@ extern void *__memset(void *, int, __kernel_size_t); void memcpy_flushcache(void *dst, const void *src, size_t cnt); #endif -#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ + !defined(__SANITIZE_ADDRESS__) /* * For files that are not instrumented (e.g. mm/slub.c) we diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index abb31aa1f8ca..6f986e09a781 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -159,8 +159,28 @@ static inline void __uaccess_enable_hw_pan(void) CONFIG_ARM64_PAN)); } +/* + * The Tag Check Flag (TCF) mode for MTE is per EL, hence TCF0 + * affects EL0 and TCF affects EL1 irrespective of which TTBR is + * used. + * The kernel accesses TTBR0 usually with LDTR/STTR instructions + * when UAO is available, so these would act as EL0 accesses using + * TCF0. + * However futex.h code uses exclusives which would be executed as + * EL1, this can potentially cause a tag check fault even if the + * user disables TCF0. + * + * To address the problem we set the PSTATE.TCO bit in uaccess_enable() + * and reset it in uaccess_disable(). + * + * The Tag check override (TCO) bit disables temporarily the tag checking + * preventing the issue. + */ static inline void uaccess_disable_privileged(void) { + asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(0), + ARM64_MTE, CONFIG_KASAN_HW_TAGS)); + if (uaccess_ttbr0_disable()) return; @@ -169,6 +189,9 @@ static inline void uaccess_disable_privileged(void) static inline void uaccess_enable_privileged(void) { + asm volatile(ALTERNATIVE("nop", SET_PSTATE_TCO(1), + ARM64_MTE, CONFIG_KASAN_HW_TAGS)); + if (uaccess_ttbr0_enable()) return; diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c index 5e82488f1b82..f42fd9e33981 100644 --- a/arch/arm64/kernel/asm-offsets.c +++ b/arch/arm64/kernel/asm-offsets.c @@ -47,6 +47,9 @@ int main(void) DEFINE(THREAD_KEYS_USER, offsetof(struct task_struct, thread.keys_user)); DEFINE(THREAD_KEYS_KERNEL, offsetof(struct task_struct, thread.keys_kernel)); #endif +#ifdef CONFIG_ARM64_MTE + DEFINE(THREAD_GCR_EL1_USER, offsetof(struct task_struct, thread.gcr_user_excl)); +#endif BLANK(); DEFINE(S_X0, offsetof(struct pt_regs, regs[0])); DEFINE(S_X2, offsetof(struct pt_regs, regs[2])); diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d87cfc6246e0..7ffb5f1d8b68 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -70,6 +70,7 @@ #include <linux/types.h> #include <linux/mm.h> #include <linux/cpu.h> +#include <linux/kasan.h> #include <asm/cpu.h> #include <asm/cpufeature.h> #include <asm/cpu_ops.h> @@ -1710,6 +1711,8 @@ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap) cleared_zero_page = true; mte_clear_page_tags(lm_alias(empty_zero_page)); } + + kasan_init_hw_tags_cpu(); } #endif /* CONFIG_ARM64_MTE */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 51c762156099..2a93fa5f4e49 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -173,6 +173,43 @@ alternative_else_nop_endif #endif .endm + .macro mte_set_gcr, tmp, tmp2 +#ifdef CONFIG_ARM64_MTE + /* + * Calculate and set the exclude mask preserving + * the RRND (bit[16]) setting. + */ + mrs_s \tmp2, SYS_GCR_EL1 + bfi \tmp2, \tmp, #0, #16 + msr_s SYS_GCR_EL1, \tmp2 + isb +#endif + .endm + + .macro mte_set_kernel_gcr, tmp, tmp2 +#ifdef CONFIG_KASAN_HW_TAGS +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + ldr_l \tmp, gcr_kernel_excl + + mte_set_gcr \tmp, \tmp2 +1: +#endif + .endm + + .macro mte_set_user_gcr, tsk, tmp, tmp2 +#ifdef CONFIG_ARM64_MTE +alternative_if_not ARM64_MTE + b 1f +alternative_else_nop_endif + ldr \tmp, [\tsk, #THREAD_GCR_EL1_USER] + + mte_set_gcr \tmp, \tmp2 +1: +#endif + .endm + .macro kernel_entry, el, regsize = 64 .if \regsize == 32 mov w0, w0 // zero upper 32 bits of x0 @@ -212,6 +249,8 @@ alternative_else_nop_endif ptrauth_keys_install_kernel tsk, x20, x22, x23 + mte_set_kernel_gcr x22, x23 + scs_load tsk, x20 .else add x21, sp, #S_FRAME_SIZE @@ -315,6 +354,8 @@ alternative_else_nop_endif /* No kernel C function calls after this as user keys are set. */ ptrauth_keys_install_user tsk, x0, x1, x2 + mte_set_user_gcr tsk, x0, x1 + apply_ssbd 0, x0, x1 .endif diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 42b23ce679dc..a0dc987724ed 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -433,7 +433,7 @@ SYM_FUNC_START_LOCAL(__primary_switched) bl __pi_memset dsb ishst // Make zero page visible to PTW -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) bl kasan_early_init #endif #ifdef CONFIG_RANDOMIZE_BASE diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 42003774d261..9c9f47e9f7f4 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -371,6 +371,11 @@ static void swsusp_mte_restore_tags(void) unsigned long pfn = xa_state.xa_index; struct page *page = pfn_to_online_page(pfn); + /* + * It is not required to invoke page_kasan_tag_reset(page) + * at this point since the tags stored in page->flags are + * already restored. + */ mte_restore_page_tags(page_address(page), tags); mte_free_tag_storage(tags); diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h index 39289d75118d..f676243abac6 100644 --- a/arch/arm64/kernel/image-vars.h +++ b/arch/arm64/kernel/image-vars.h @@ -37,7 +37,7 @@ __efistub_strncmp = __pi_strncmp; __efistub_strrchr = __pi_strrchr; __efistub___clean_dcache_area_poc = __pi___clean_dcache_area_poc; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) __efistub___memcpy = __pi_memcpy; __efistub___memmove = __pi_memmove; __efistub___memset = __pi_memset; diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 0921aa1520b0..1c74c45b9494 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -161,7 +161,8 @@ u64 __init kaslr_early_init(u64 dt_phys) /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; - if (IS_ENABLED(CONFIG_KASAN)) + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* * KASAN does not expect the module region to intersect the * vmalloc region, since shadow memory is allocated for each diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index 2a1ad95d9b2c..fe21e0f06492 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -30,7 +30,8 @@ void *module_alloc(unsigned long size) if (IS_ENABLED(CONFIG_ARM64_MODULE_PLTS)) gfp_mask |= __GFP_NOWARN; - if (IS_ENABLED(CONFIG_KASAN)) + if (IS_ENABLED(CONFIG_KASAN_GENERIC) || + IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* don't exceed the static module region - see below */ module_alloc_end = MODULES_END; @@ -39,7 +40,8 @@ void *module_alloc(unsigned long size) NUMA_NO_NODE, __builtin_return_address(0)); if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) && - !IS_ENABLED(CONFIG_KASAN)) + !IS_ENABLED(CONFIG_KASAN_GENERIC) && + !IS_ENABLED(CONFIG_KASAN_SW_TAGS)) /* * KASAN can only deal with module allocations being served * from the reserved module region, since the remainder of diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c index ef15c8a2a49d..dc9ada64feed 100644 --- a/arch/arm64/kernel/mte.c +++ b/arch/arm64/kernel/mte.c @@ -13,13 +13,18 @@ #include <linux/swap.h> #include <linux/swapops.h> #include <linux/thread_info.h> +#include <linux/types.h> #include <linux/uio.h> +#include <asm/barrier.h> #include <asm/cpufeature.h> #include <asm/mte.h> +#include <asm/mte-kasan.h> #include <asm/ptrace.h> #include <asm/sysreg.h> +u64 gcr_kernel_excl __ro_after_init; + static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) { pte_t old_pte = READ_ONCE(*ptep); @@ -31,6 +36,15 @@ static void mte_sync_page_tags(struct page *page, pte_t *ptep, bool check_swap) return; } + page_kasan_tag_reset(page); + /* + * We need smp_wmb() in between setting the flags and clearing the + * tags because if another thread reads page->flags and builds a + * tagged address out of it, there is an actual dependency to the + * memory access, but on the current thread we do not guarantee that + * the new page->flags are visible before the tags were updated. + */ + smp_wmb(); mte_clear_page_tags(page_address(page)); } @@ -72,6 +86,78 @@ int memcmp_pages(struct page *page1, struct page *page2) return ret; } +u8 mte_get_mem_tag(void *addr) +{ + if (!system_supports_mte()) + return 0xFF; + + asm(__MTE_PREAMBLE "ldg %0, [%0]" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +u8 mte_get_random_tag(void) +{ + void *addr; + + if (!system_supports_mte()) + return 0xFF; + + asm(__MTE_PREAMBLE "irg %0, %0" + : "+r" (addr)); + + return mte_get_ptr_tag(addr); +} + +void *mte_set_mem_tag_range(void *addr, size_t size, u8 tag) +{ + void *ptr = addr; + + if ((!system_supports_mte()) || (size == 0)) + return addr; + + /* Make sure that size is MTE granule aligned. */ + WARN_ON(size & (MTE_GRANULE_SIZE - 1)); + + /* Make sure that the address is MTE granule aligned. */ + WARN_ON((u64)addr & (MTE_GRANULE_SIZE - 1)); + + tag = 0xF0 | tag; + ptr = (void *)__tag_set(ptr, tag); + + mte_assign_mem_tag_range(ptr, size); + + return ptr; +} + +void mte_init_tags(u64 max_tag) +{ + static bool gcr_kernel_excl_initialized; + + if (!gcr_kernel_excl_initialized) { + /* + * The format of the tags in KASAN is 0xFF and in MTE is 0xF. + * This conversion extracts an MTE tag from a KASAN tag. + */ + u64 incl = GENMASK(FIELD_GET(MTE_TAG_MASK >> MTE_TAG_SHIFT, + max_tag), 0); + + gcr_kernel_excl = ~incl & SYS_GCR_EL1_EXCL_MASK; + gcr_kernel_excl_initialized = true; + } + + /* Enable the kernel exclude mask for random tags generation. */ + write_sysreg_s(SYS_GCR_EL1_RRND | gcr_kernel_excl, SYS_GCR_EL1); +} + +void mte_enable_kernel(void) +{ + /* Enable MTE Sync Mode for EL1. */ + sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_SYNC); + isb(); +} + static void update_sctlr_el1_tcf0(u64 tcf0) { /* ISB required for the kernel uaccess routines */ @@ -92,23 +178,26 @@ static void set_sctlr_el1_tcf0(u64 tcf0) preempt_enable(); } -static void update_gcr_el1_excl(u64 incl) +static void update_gcr_el1_excl(u64 excl) { - u64 excl = ~incl & SYS_GCR_EL1_EXCL_MASK; /* - * Note that 'incl' is an include mask (controlled by the user via - * prctl()) while GCR_EL1 accepts an exclude mask. + * Note that the mask controlled by the user via prctl() is an + * include while GCR_EL1 accepts an exclude mask. * No need for ISB since this only affects EL0 currently, implicit * with ERET. */ sysreg_clear_set_s(SYS_GCR_EL1, SYS_GCR_EL1_EXCL_MASK, excl); } -static void set_gcr_el1_excl(u64 incl) +static void set_gcr_el1_excl(u64 excl) { - current->thread.gcr_user_incl = incl; - update_gcr_el1_excl(incl); + current->thread.gcr_user_excl = excl; + + /* + * SYS_GCR_EL1 will be set to current->thread.gcr_user_excl value + * by mte_set_user_gcr() in kernel_exit, + */ } void flush_mte_state(void) @@ -123,7 +212,7 @@ void flush_mte_state(void) /* disable tag checking */ set_sctlr_el1_tcf0(SCTLR_EL1_TCF0_NONE); /* reset tag generation mask */ - set_gcr_el1_excl(0); + set_gcr_el1_excl(SYS_GCR_EL1_EXCL_MASK); } void mte_thread_switch(struct task_struct *next) @@ -134,7 +223,6 @@ void mte_thread_switch(struct task_struct *next) /* avoid expensive SCTLR_EL1 accesses if no change */ if (current->thread.sctlr_tcf0 != next->thread.sctlr_tcf0) update_sctlr_el1_tcf0(next->thread.sctlr_tcf0); - update_gcr_el1_excl(next->thread.gcr_user_incl); } void mte_suspend_exit(void) @@ -142,13 +230,14 @@ void mte_suspend_exit(void) if (!system_supports_mte()) return; - update_gcr_el1_excl(current->thread.gcr_user_incl); + update_gcr_el1_excl(gcr_kernel_excl); } long set_mte_ctrl(struct task_struct *task, unsigned long arg) { u64 tcf0; - u64 gcr_incl = (arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT; + u64 gcr_excl = ~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) & + SYS_GCR_EL1_EXCL_MASK; if (!system_supports_mte()) return 0; @@ -169,10 +258,10 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) if (task != current) { task->thread.sctlr_tcf0 = tcf0; - task->thread.gcr_user_incl = gcr_incl; + task->thread.gcr_user_excl = gcr_excl; } else { set_sctlr_el1_tcf0(tcf0); - set_gcr_el1_excl(gcr_incl); + set_gcr_el1_excl(gcr_excl); } return 0; @@ -181,11 +270,12 @@ long set_mte_ctrl(struct task_struct *task, unsigned long arg) long get_mte_ctrl(struct task_struct *task) { unsigned long ret; + u64 incl = ~task->thread.gcr_user_excl & SYS_GCR_EL1_EXCL_MASK; if (!system_supports_mte()) return 0; - ret = task->thread.gcr_user_incl << PR_MTE_TAG_SHIFT; + ret = incl << PR_MTE_TAG_SHIFT; switch (task->thread.sctlr_tcf0) { case SCTLR_EL1_TCF0_NONE: diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index c44eb4b80163..c18aacde8bb0 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -358,7 +358,7 @@ void __init __no_sanitize_address setup_arch(char **cmdline_p) smp_build_mpidr_hash(); /* Init percpu seeds for random tags after cpus are set up. */ - kasan_init_tags(); + kasan_init_sw_tags(); #ifdef CONFIG_ARM64_SW_TTBR0_PAN /* diff --git a/arch/arm64/kernel/sleep.S b/arch/arm64/kernel/sleep.S index 4be7f7eed875..6bdef7362c0e 100644 --- a/arch/arm64/kernel/sleep.S +++ b/arch/arm64/kernel/sleep.S @@ -133,7 +133,7 @@ SYM_FUNC_START(_cpu_resume) */ bl cpu_do_resume -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK mov x0, sp bl kasan_unpoison_task_stack_below #endif diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 2499b895efea..19b1705ae5cb 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -462,6 +462,8 @@ void __init smp_prepare_boot_cpu(void) /* Conditionally switch to GIC PMR for interrupt masking */ if (system_uses_irq_prio_masking()) init_gic_priority_masking(); + + kasan_init_hw_tags(); } static u64 __init of_get_cpu_mpidr(struct device_node *dn) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 351537c12f36..9e1a12e10053 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -149,3 +149,19 @@ SYM_FUNC_START(mte_restore_page_tags) ret SYM_FUNC_END(mte_restore_page_tags) + +/* + * Assign allocation tags for a region of memory based on the pointer tag + * x0 - source pointer + * x1 - size + * + * Note: The address must be non-NULL and MTE_GRANULE_SIZE aligned and + * size must be non-zero and MTE_GRANULE_SIZE aligned. + */ +SYM_FUNC_START(mte_assign_mem_tag_range) +1: stg x0, [x0] + add x0, x0, #MTE_GRANULE_SIZE + subs x1, x1, #MTE_GRANULE_SIZE + b.gt 1b + ret +SYM_FUNC_END(mte_assign_mem_tag_range) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 70a71f38b6a9..b5447e53cd73 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -23,6 +23,15 @@ void copy_highpage(struct page *to, struct page *from) if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { set_bit(PG_mte_tagged, &to->flags); + page_kasan_tag_reset(to); + /* + * We need smp_wmb() in between setting the flags and clearing the + * tags because if another thread reads page->flags and builds a + * tagged address out of it, there is an actual dependency to the + * memory access, but on the current thread we do not guarantee that + * the new page->flags are visible before the tags were updated. + */ + smp_wmb(); mte_copy_page_tags(kto, kfrom); } } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 2848952b178d..3c40da479899 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -14,6 +14,7 @@ #include <linux/mm.h> #include <linux/hardirq.h> #include <linux/init.h> +#include <linux/kasan.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/page-flags.h> @@ -33,6 +34,7 @@ #include <asm/debug-monitors.h> #include <asm/esr.h> #include <asm/kprobes.h> +#include <asm/mte.h> #include <asm/processor.h> #include <asm/sysreg.h> #include <asm/system_misc.h> @@ -296,6 +298,57 @@ static void die_kernel_fault(const char *msg, unsigned long addr, do_exit(SIGKILL); } +#ifdef CONFIG_KASAN_HW_TAGS +static void report_tag_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + bool is_write = ((esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT) != 0; + + /* + * SAS bits aren't set for all faults reported in EL1, so we can't + * find out access size. + */ + kasan_report(addr, 0, is_write, regs->pc); +} +#else +/* Tag faults aren't enabled without CONFIG_KASAN_HW_TAGS. */ +static inline void report_tag_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) { } +#endif + +static void do_tag_recovery(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + static bool reported; + + if (!READ_ONCE(reported)) { + report_tag_fault(addr, esr, regs); + WRITE_ONCE(reported, true); + } + + /* + * Disable MTE Tag Checking on the local CPU for the current EL. + * It will be done lazily on the other CPUs when they will hit a + * tag fault. + */ + sysreg_clear_set(sctlr_el1, SCTLR_ELx_TCF_MASK, SCTLR_ELx_TCF_NONE); + isb(); +} + +static bool is_el1_mte_sync_tag_check_fault(unsigned int esr) +{ + unsigned int ec = ESR_ELx_EC(esr); + unsigned int fsc = esr & ESR_ELx_FSC; + + if (ec != ESR_ELx_EC_DABT_CUR) + return false; + + if (fsc == ESR_ELx_FSC_MTE) + return true; + + return false; +} + static void __do_kernel_fault(unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -312,6 +365,12 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) return; + if (is_el1_mte_sync_tag_check_fault(esr)) { + do_tag_recovery(addr, esr, regs); + + return; + } + if (is_el1_permission_fault(addr, esr, regs)) { if (esr & ESR_ELx_WNR) msg = "write to read-only memory"; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index b24e43d20667..d8e66c78440e 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -21,6 +21,8 @@ #include <asm/sections.h> #include <asm/tlbflush.h> +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) + static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); /* @@ -208,7 +210,7 @@ static void __init clear_pgds(unsigned long start, set_pgd(pgd_offset_k(start), __pgd(0)); } -void __init kasan_init(void) +static void __init kasan_init_shadow(void) { u64 kimg_shadow_start, kimg_shadow_end; u64 mod_shadow_start, mod_shadow_end; @@ -269,8 +271,21 @@ void __init kasan_init(void) memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); +} - /* At this point kasan is fully initialized. Enable error messages */ +static void __init kasan_init_depth(void) +{ init_task.kasan_depth = 0; +} + +void __init kasan_init(void) +{ + kasan_init_shadow(); + kasan_init_depth(); +#if defined(CONFIG_KASAN_GENERIC) + /* CONFIG_KASAN_SW_TAGS also requires kasan_init_sw_tags(). */ pr_info("KernelAddressSanitizer initialized\n"); +#endif } + +#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */ diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c index c52c1847079c..7c4ef56265ee 100644 --- a/arch/arm64/mm/mteswap.c +++ b/arch/arm64/mm/mteswap.c @@ -53,6 +53,15 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page) if (!tags) return false; + page_kasan_tag_reset(page); + /* + * We need smp_wmb() in between setting the flags and clearing the + * tags because if another thread reads page->flags and builds a + * tagged address out of it, there is an actual dependency to the + * memory access, but on the current thread we do not guarantee that + * the new page->flags are visible before the tags were updated. + */ + smp_wmb(); mte_restore_page_tags(page_address(page), tags); return true; diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index a0831bf8a018..37a54b57178a 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -40,9 +40,15 @@ #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA #ifdef CONFIG_KASAN_SW_TAGS -#define TCR_KASAN_FLAGS TCR_TBI1 | TCR_TBID1 +#define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 #else -#define TCR_KASAN_FLAGS 0 +#define TCR_KASAN_SW_FLAGS 0 +#endif + +#ifdef CONFIG_KASAN_HW_TAGS +#define TCR_KASAN_HW_FLAGS SYS_TCR_EL1_TCMA1 | TCR_TBI1 +#else +#define TCR_KASAN_HW_FLAGS 0 #endif /* @@ -427,6 +433,10 @@ SYM_FUNC_START(__cpu_setup) */ mov_q x5, MAIR_EL1_SET #ifdef CONFIG_ARM64_MTE + mte_tcr .req x20 + + mov mte_tcr, #0 + /* * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported * (ID_AA64PFR1_EL1[11:8] > 1). @@ -447,6 +457,9 @@ SYM_FUNC_START(__cpu_setup) /* clear any pending tag check faults in TFSR*_EL1 */ msr_s SYS_TFSR_EL1, xzr msr_s SYS_TFSRE0_EL1, xzr + + /* set the TCR_EL1 bits */ + mov_q mte_tcr, TCR_KASAN_HW_FLAGS 1: #endif msr mair_el1, x5 @@ -456,7 +469,11 @@ SYM_FUNC_START(__cpu_setup) */ mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ - TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS + TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS +#ifdef CONFIG_ARM64_MTE + orr x10, x10, mte_tcr + .unreq mte_tcr +#endif tcr_clear_errata_bits x10, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c index 807dc634bbd2..04137a8f3d2d 100644 --- a/arch/arm64/mm/ptdump.c +++ b/arch/arm64/mm/ptdump.c @@ -29,7 +29,7 @@ enum address_markers_idx { PAGE_OFFSET_NR = 0, PAGE_END_NR, -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) KASAN_START_NR, #endif }; @@ -37,7 +37,7 @@ enum address_markers_idx { static struct addr_marker address_markers[] = { { PAGE_OFFSET, "Linear Mapping start" }, { 0 /* PAGE_END */, "Linear Mapping end" }, -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif @@ -383,7 +383,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { address_markers[PAGE_END_NR].start_address = PAGE_END; -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS) address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; #endif ptdump_initialize(); diff --git a/arch/s390/boot/string.c b/arch/s390/boot/string.c index b11e8108773a..faccb33b462c 100644 --- a/arch/s390/boot/string.c +++ b/arch/s390/boot/string.c @@ -3,6 +3,7 @@ #include <linux/kernel.h> #include <linux/errno.h> #undef CONFIG_KASAN +#undef CONFIG_KASAN_GENERIC #include "../lib/string.c" int strncmp(const char *cs, const char *ct, size_t count) diff --git a/arch/x86/boot/compressed/misc.h b/arch/x86/boot/compressed/misc.h index d9a631c5973c..901ea5ebec22 100644 --- a/arch/x86/boot/compressed/misc.h +++ b/arch/x86/boot/compressed/misc.h @@ -12,6 +12,7 @@ #undef CONFIG_PARAVIRT_XXL #undef CONFIG_PARAVIRT_SPINLOCKS #undef CONFIG_KASAN +#undef CONFIG_KASAN_GENERIC /* cpu_feature_enabled() cannot be used this early */ #define USE_EARLY_PGTABLE_L5 diff --git a/arch/x86/kernel/acpi/wakeup_64.S b/arch/x86/kernel/acpi/wakeup_64.S index c8daa92f38dc..5d3a0b8fd379 100644 --- a/arch/x86/kernel/acpi/wakeup_64.S +++ b/arch/x86/kernel/acpi/wakeup_64.S @@ -112,7 +112,7 @@ SYM_FUNC_START(do_suspend_lowlevel) movq pt_regs_r14(%rax), %r14 movq pt_regs_r15(%rax), %r15 -#ifdef CONFIG_KASAN +#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK /* * The suspend path may have poisoned some areas deeper in the stack, * which we now need to unpoison. |