summaryrefslogtreecommitdiff
path: root/arch/arm64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64')
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/Makefile11
-rw-r--r--arch/arm64/include/asm/brk-imm.h2
-rw-r--r--arch/arm64/include/asm/kasan.h8
-rw-r--r--arch/arm64/include/asm/memory.h43
-rw-r--r--arch/arm64/include/asm/pgtable-hwdef.h1
-rw-r--r--arch/arm64/include/asm/uaccess.h7
-rw-r--r--arch/arm64/kernel/traps.c60
-rw-r--r--arch/arm64/mm/fault.c31
-rw-r--r--arch/arm64/mm/kasan_init.c57
-rw-r--r--arch/arm64/mm/mmu.c13
-rw-r--r--arch/arm64/mm/proc.S8
12 files changed, 187 insertions, 55 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ff9291872372..86b18c1bd33c 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -110,6 +110,7 @@ config ARM64
select HAVE_ARCH_JUMP_LABEL
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN if !(ARM64_16K_PAGES && ARM64_VA_BITS_48)
+ select HAVE_ARCH_KASAN_SW_TAGS if HAVE_ARCH_KASAN
select HAVE_ARCH_KGDB
select HAVE_ARCH_MMAP_RND_BITS
select HAVE_ARCH_MMAP_RND_COMPAT_BITS if COMPAT
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 398bdb81a900..b025304bde46 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -101,10 +101,19 @@ else
TEXT_OFFSET := 0x00080000
endif
+ifeq ($(CONFIG_KASAN_SW_TAGS), y)
+KASAN_SHADOW_SCALE_SHIFT := 4
+else
+KASAN_SHADOW_SCALE_SHIFT := 3
+endif
+
+KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT)
+
# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT))
# in 32-bit arithmetic
-KASAN_SHADOW_SCALE_SHIFT := 3
KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \
(0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \
+ (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \
diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h
index ed693c5bcec0..2945fe6cd863 100644
--- a/arch/arm64/include/asm/brk-imm.h
+++ b/arch/arm64/include/asm/brk-imm.h
@@ -16,10 +16,12 @@
* 0x400: for dynamic BRK instruction
* 0x401: for compile time BRK instruction
* 0x800: kernel-mode BUG() and WARN() traps
+ * 0x9xx: tag-based KASAN trap (allowed values 0x900 - 0x9ff)
*/
#define FAULT_BRK_IMM 0x100
#define KGDB_DYN_DBG_BRK_IMM 0x400
#define KGDB_COMPILED_DBG_BRK_IMM 0x401
#define BUG_BRK_IMM 0x800
+#define KASAN_BRK_IMM 0x900
#endif
diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h
index 8758bb008436..b52aacd2c526 100644
--- a/arch/arm64/include/asm/kasan.h
+++ b/arch/arm64/include/asm/kasan.h
@@ -4,12 +4,16 @@
#ifndef __ASSEMBLY__
-#ifdef CONFIG_KASAN
-
#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/pgtable-types.h>
+#define arch_kasan_set_tag(addr, tag) __tag_set(addr, tag)
+#define arch_kasan_reset_tag(addr) __tag_reset(addr)
+#define arch_kasan_get_tag(addr) __tag_get(addr)
+
+#ifdef CONFIG_KASAN
+
/*
* KASAN_SHADOW_START: beginning of the kernel virtual addresses.
* KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 0385752bd079..2bb8721da7ef 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -74,13 +74,11 @@
#endif
/*
- * KASAN requires 1/8th of the kernel virtual address space for the shadow
- * region. KASAN can bloat the stack significantly, so double the (minimum)
- * stack size when KASAN is in use, and then double it again if KASAN_EXTRA is
- * on.
+ * Generic and tag-based KASAN require 1/8th and 1/16th of the kernel virtual
+ * address space for the shadow region respectively. They can bloat the stack
+ * significantly, so double the (minimum) stack size when they are in use.
*/
#ifdef CONFIG_KASAN
-#define KASAN_SHADOW_SCALE_SHIFT 3
#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT))
#ifdef CONFIG_KASAN_EXTRA
#define KASAN_THREAD_SHIFT 2
@@ -221,6 +219,26 @@ extern u64 vabits_user;
#define PHYS_PFN_OFFSET (PHYS_OFFSET >> PAGE_SHIFT)
/*
+ * When dealing with data aborts, watchpoints, or instruction traps we may end
+ * up with a tagged userland pointer. Clear the tag to get a sane pointer to
+ * pass on to access_ok(), for instance.
+ */
+#define untagged_addr(addr) \
+ ((__typeof__(addr))sign_extend64((u64)(addr), 55))
+
+#ifdef CONFIG_KASAN_SW_TAGS
+#define __tag_shifted(tag) ((u64)(tag) << 56)
+#define __tag_set(addr, tag) (__typeof__(addr))( \
+ ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag))
+#define __tag_reset(addr) untagged_addr(addr)
+#define __tag_get(addr) (__u8)((u64)(addr) >> 56)
+#else
+#define __tag_set(addr, tag) (addr)
+#define __tag_reset(addr) (addr)
+#define __tag_get(addr) 0
+#endif
+
+/*
* Physical vs virtual RAM address space conversion. These are
* private definitions which should NOT be used outside memory.h
* files. Use virt_to_phys/phys_to_virt/__pa/__va instead.
@@ -303,7 +321,13 @@ static inline void *phys_to_virt(phys_addr_t x)
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
-#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
+#define page_to_virt(page) ({ \
+ unsigned long __addr = \
+ ((__page_to_voff(page)) | PAGE_OFFSET); \
+ __addr = __tag_set(__addr, page_kasan_tag(page)); \
+ ((void *)__addr); \
+})
+
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \
@@ -311,9 +335,10 @@ static inline void *phys_to_virt(phys_addr_t x)
#endif
#endif
-#define _virt_addr_is_linear(kaddr) (((u64)(kaddr)) >= PAGE_OFFSET)
-#define virt_addr_valid(kaddr) (_virt_addr_is_linear(kaddr) && \
- _virt_addr_valid(kaddr))
+#define _virt_addr_is_linear(kaddr) \
+ (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET)
+#define virt_addr_valid(kaddr) \
+ (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr))
#include <asm-generic/memory_model.h>
diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h
index 22bb3ae514f5..e9b0a7d75184 100644
--- a/arch/arm64/include/asm/pgtable-hwdef.h
+++ b/arch/arm64/include/asm/pgtable-hwdef.h
@@ -299,6 +299,7 @@
#define TCR_A1 (UL(1) << 22)
#define TCR_ASID16 (UL(1) << 36)
#define TCR_TBI0 (UL(1) << 37)
+#define TCR_TBI1 (UL(1) << 38)
#define TCR_HA (UL(1) << 39)
#define TCR_HD (UL(1) << 40)
#define TCR_NFD1 (UL(1) << 54)
diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index fad33f5fde47..ed252435fd92 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -95,13 +95,6 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
return ret;
}
-/*
- * When dealing with data aborts, watchpoints, or instruction traps we may end
- * up with a tagged userland pointer. Clear the tag to get a sane pointer to
- * pass on to access_ok(), for instance.
- */
-#define untagged_addr(addr) sign_extend64(addr, 55)
-
#define access_ok(type, addr, size) __range_ok(addr, size)
#define user_addr_max get_fs
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5f4d9acb32f5..cdc71cf70aad 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -35,6 +35,7 @@
#include <linux/sizes.h>
#include <linux/syscalls.h>
#include <linux/mm_types.h>
+#include <linux/kasan.h>
#include <asm/atomic.h>
#include <asm/bug.h>
@@ -969,6 +970,58 @@ static struct break_hook bug_break_hook = {
.fn = bug_handler,
};
+#ifdef CONFIG_KASAN_SW_TAGS
+
+#define KASAN_ESR_RECOVER 0x20
+#define KASAN_ESR_WRITE 0x10
+#define KASAN_ESR_SIZE_MASK 0x0f
+#define KASAN_ESR_SIZE(esr) (1 << ((esr) & KASAN_ESR_SIZE_MASK))
+
+static int kasan_handler(struct pt_regs *regs, unsigned int esr)
+{
+ bool recover = esr & KASAN_ESR_RECOVER;
+ bool write = esr & KASAN_ESR_WRITE;
+ size_t size = KASAN_ESR_SIZE(esr);
+ u64 addr = regs->regs[0];
+ u64 pc = regs->pc;
+
+ if (user_mode(regs))
+ return DBG_HOOK_ERROR;
+
+ kasan_report(addr, size, write, pc);
+
+ /*
+ * The instrumentation allows to control whether we can proceed after
+ * a crash was detected. This is done by passing the -recover flag to
+ * the compiler. Disabling recovery allows to generate more compact
+ * code.
+ *
+ * Unfortunately disabling recovery doesn't work for the kernel right
+ * now. KASAN reporting is disabled in some contexts (for example when
+ * the allocator accesses slab object metadata; this is controlled by
+ * current->kasan_depth). All these accesses are detected by the tool,
+ * even though the reports for them are not printed.
+ *
+ * This is something that might be fixed at some point in the future.
+ */
+ if (!recover)
+ die("Oops - KASAN", regs, 0);
+
+ /* If thread survives, skip over the brk instruction and continue: */
+ arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
+ return DBG_HOOK_HANDLED;
+}
+
+#define KASAN_ESR_VAL (0xf2000000 | KASAN_BRK_IMM)
+#define KASAN_ESR_MASK 0xffffff00
+
+static struct break_hook kasan_break_hook = {
+ .esr_val = KASAN_ESR_VAL,
+ .esr_mask = KASAN_ESR_MASK,
+ .fn = kasan_handler,
+};
+#endif
+
/*
* Initial handler for AArch64 BRK exceptions
* This handler only used until debug_traps_init().
@@ -976,6 +1029,10 @@ static struct break_hook bug_break_hook = {
int __init early_brk64(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
+#ifdef CONFIG_KASAN_SW_TAGS
+ if ((esr & KASAN_ESR_MASK) == KASAN_ESR_VAL)
+ return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
+#endif
return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
}
@@ -983,4 +1040,7 @@ int __init early_brk64(unsigned long addr, unsigned int esr,
void __init trap_init(void)
{
register_break_hook(&bug_break_hook);
+#ifdef CONFIG_KASAN_SW_TAGS
+ register_break_hook(&kasan_break_hook);
+#endif
}
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 5fe6d2e40e9b..efb7b2cbead5 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -40,6 +40,7 @@
#include <asm/daifflags.h>
#include <asm/debug-monitors.h>
#include <asm/esr.h>
+#include <asm/kasan.h>
#include <asm/sysreg.h>
#include <asm/system_misc.h>
#include <asm/pgtable.h>
@@ -132,6 +133,18 @@ static void mem_abort_decode(unsigned int esr)
data_abort_decode(esr);
}
+static inline bool is_ttbr0_addr(unsigned long addr)
+{
+ /* entry assembly clears tags for TTBR0 addrs */
+ return addr < TASK_SIZE;
+}
+
+static inline bool is_ttbr1_addr(unsigned long addr)
+{
+ /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */
+ return arch_kasan_reset_tag(addr) >= VA_START;
+}
+
/*
* Dump out the page tables associated with 'addr' in the currently active mm.
*/
@@ -141,7 +154,7 @@ void show_pte(unsigned long addr)
pgd_t *pgdp;
pgd_t pgd;
- if (addr < TASK_SIZE) {
+ if (is_ttbr0_addr(addr)) {
/* TTBR0 */
mm = current->active_mm;
if (mm == &init_mm) {
@@ -149,7 +162,7 @@ void show_pte(unsigned long addr)
addr);
return;
}
- } else if (addr >= VA_START) {
+ } else if (is_ttbr1_addr(addr)) {
/* TTBR1 */
mm = &init_mm;
} else {
@@ -254,7 +267,7 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr,
if (fsc_type == ESR_ELx_FSC_PERM)
return true;
- if (addr < TASK_SIZE && system_uses_ttbr0_pan())
+ if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan())
return fsc_type == ESR_ELx_FSC_FAULT &&
(regs->pstate & PSR_PAN_BIT);
@@ -319,7 +332,7 @@ static void set_thread_esr(unsigned long address, unsigned int esr)
* type", so we ignore this wrinkle and just return the translation
* fault.)
*/
- if (current->thread.fault_address >= TASK_SIZE) {
+ if (!is_ttbr0_addr(current->thread.fault_address)) {
switch (ESR_ELx_EC(esr)) {
case ESR_ELx_EC_DABT_LOW:
/*
@@ -455,7 +468,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
mm_flags |= FAULT_FLAG_WRITE;
}
- if (addr < TASK_SIZE && is_el1_permission_fault(addr, esr, regs)) {
+ if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) {
/* regs->orig_addr_limit may be 0 if we entered from EL0 */
if (regs->orig_addr_limit == KERNEL_DS)
die_kernel_fault("access to user memory with fs=KERNEL_DS",
@@ -603,7 +616,7 @@ static int __kprobes do_translation_fault(unsigned long addr,
unsigned int esr,
struct pt_regs *regs)
{
- if (addr < TASK_SIZE)
+ if (is_ttbr0_addr(addr))
return do_page_fault(addr, esr, regs);
do_bad_area(addr, esr, regs);
@@ -758,7 +771,7 @@ asmlinkage void __exception do_el0_ia_bp_hardening(unsigned long addr,
* re-enabled IRQs. If the address is a kernel address, apply
* BP hardening prior to enabling IRQs and pre-emption.
*/
- if (addr > TASK_SIZE)
+ if (!is_ttbr0_addr(addr))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
@@ -771,7 +784,7 @@ asmlinkage void __exception do_sp_pc_abort(unsigned long addr,
struct pt_regs *regs)
{
if (user_mode(regs)) {
- if (instruction_pointer(regs) > TASK_SIZE)
+ if (!is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
local_daif_restore(DAIF_PROCCTX);
}
@@ -825,7 +838,7 @@ asmlinkage int __exception do_debug_exception(unsigned long addr,
if (interrupts_enabled(regs))
trace_hardirqs_off();
- if (user_mode(regs) && instruction_pointer(regs) > TASK_SIZE)
+ if (user_mode(regs) && !is_ttbr0_addr(instruction_pointer(regs)))
arm64_apply_bp_hardening();
if (!inf->fn(addr, esr, regs)) {
diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c
index 63527e585aac..4b55b15707a3 100644
--- a/arch/arm64/mm/kasan_init.c
+++ b/arch/arm64/mm/kasan_init.c
@@ -39,7 +39,15 @@ static phys_addr_t __init kasan_alloc_zeroed_page(int node)
{
void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
__pa(MAX_DMA_ADDRESS),
- MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ MEMBLOCK_ALLOC_KASAN, node);
+ return __pa(p);
+}
+
+static phys_addr_t __init kasan_alloc_raw_page(int node)
+{
+ void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS),
+ MEMBLOCK_ALLOC_KASAN, node);
return __pa(p);
}
@@ -47,8 +55,9 @@ static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
bool early)
{
if (pmd_none(READ_ONCE(*pmdp))) {
- phys_addr_t pte_phys = early ? __pa_symbol(kasan_zero_pte)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte)
+ : kasan_alloc_zeroed_page(node);
__pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
}
@@ -60,8 +69,9 @@ static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
bool early)
{
if (pud_none(READ_ONCE(*pudp))) {
- phys_addr_t pmd_phys = early ? __pa_symbol(kasan_zero_pmd)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd)
+ : kasan_alloc_zeroed_page(node);
__pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
}
@@ -72,8 +82,9 @@ static pud_t *__init kasan_pud_offset(pgd_t *pgdp, unsigned long addr, int node,
bool early)
{
if (pgd_none(READ_ONCE(*pgdp))) {
- phys_addr_t pud_phys = early ? __pa_symbol(kasan_zero_pud)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud)
+ : kasan_alloc_zeroed_page(node);
__pgd_populate(pgdp, pud_phys, PMD_TYPE_TABLE);
}
@@ -87,8 +98,11 @@ static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
do {
- phys_addr_t page_phys = early ? __pa_symbol(kasan_zero_page)
- : kasan_alloc_zeroed_page(node);
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_raw_page(node);
+ if (!early)
+ memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
next = addr + PAGE_SIZE;
set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
} while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
@@ -205,14 +219,14 @@ void __init kasan_init(void)
kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
- kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
- (void *)mod_shadow_start);
- kasan_populate_zero_shadow((void *)kimg_shadow_end,
- kasan_mem_to_shadow((void *)PAGE_OFFSET));
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)mod_shadow_start);
+ kasan_populate_early_shadow((void *)kimg_shadow_end,
+ kasan_mem_to_shadow((void *)PAGE_OFFSET));
if (kimg_shadow_start > mod_shadow_end)
- kasan_populate_zero_shadow((void *)mod_shadow_end,
- (void *)kimg_shadow_start);
+ kasan_populate_early_shadow((void *)mod_shadow_end,
+ (void *)kimg_shadow_start);
for_each_memblock(memory, reg) {
void *start = (void *)__phys_to_virt(reg->base);
@@ -227,16 +241,19 @@ void __init kasan_init(void)
}
/*
- * KAsan may reuse the contents of kasan_zero_pte directly, so we
- * should make sure that it maps the zero page read-only.
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly,
+ * so we should make sure that it maps the zero page read-only.
*/
for (i = 0; i < PTRS_PER_PTE; i++)
- set_pte(&kasan_zero_pte[i],
- pfn_pte(sym_to_pfn(kasan_zero_page), PAGE_KERNEL_RO));
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(sym_to_pfn(kasan_early_shadow_page),
+ PAGE_KERNEL_RO));
- memset(kasan_zero_page, 0, PAGE_SIZE);
+ memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
+ kasan_init_tags();
+
/* At this point kasan is fully initialized. Enable error messages */
init_task.kasan_depth = 0;
pr_info("KernelAddressSanitizer initialized\n");
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index da513a1facf4..b6f5aa52ac67 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -1003,10 +1003,8 @@ int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr)
pmd = READ_ONCE(*pmdp);
- if (!pmd_present(pmd))
- return 1;
if (!pmd_table(pmd)) {
- VM_WARN_ON(!pmd_table(pmd));
+ VM_WARN_ON(1);
return 1;
}
@@ -1026,10 +1024,8 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
pud = READ_ONCE(*pudp);
- if (!pud_present(pud))
- return 1;
if (!pud_table(pud)) {
- VM_WARN_ON(!pud_table(pud));
+ VM_WARN_ON(1);
return 1;
}
@@ -1047,6 +1043,11 @@ int pud_free_pmd_page(pud_t *pudp, unsigned long addr)
return 1;
}
+int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
+{
+ return 0; /* Don't attempt a block mapping */
+}
+
#ifdef CONFIG_MEMORY_HOTPLUG
int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
bool want_memblock)
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index e05b3ce1db6b..73886a5f1f30 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -47,6 +47,12 @@
/* PTWs cacheable, inner/outer WBWA */
#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA
+#ifdef CONFIG_KASAN_SW_TAGS
+#define TCR_KASAN_FLAGS TCR_TBI1
+#else
+#define TCR_KASAN_FLAGS 0
+#endif
+
#define MAIR(attr, mt) ((attr) << ((mt) * 8))
/*
@@ -449,7 +455,7 @@ ENTRY(__cpu_setup)
*/
ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
- TCR_TBI0 | TCR_A1
+ TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS
#ifdef CONFIG_ARM64_USER_VA_BITS_52
ldr_l x9, vabits_user