diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/elf.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/exception.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimdmacros.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/kernel-pgtable.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu.h | 29 | ||||
-rw-r--r-- | arch/arm64/include/asm/mmu_context.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/mte-kasan.h | 17 | ||||
-rw-r--r-- | arch/arm64/include/asm/mte.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pointer_auth.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/simd.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 47 | ||||
-rw-r--r-- | arch/arm64/include/asm/tlbflush.h | 13 |
18 files changed, 120 insertions, 74 deletions
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 9bb9d11750d7..cdfa2a242e9f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -552,7 +552,7 @@ cpuid_feature_cap_perfmon_field(u64 features, int field, u64 cap) u64 mask = GENMASK_ULL(field + 3, field); /* Treat IMPLEMENTATION DEFINED functionality as unimplemented */ - if (val == 0xf) + if (val == ID_AA64DFR0_PMUVER_IMP_DEF) val = 0; if (val > cap) { @@ -657,7 +657,8 @@ static inline bool system_supports_4kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN4_SHIFT); - return val == ID_AA64MMFR0_TGRAN4_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX); } static inline bool system_supports_64kb_granule(void) @@ -669,7 +670,8 @@ static inline bool system_supports_64kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN64_SHIFT); - return val == ID_AA64MMFR0_TGRAN64_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX); } static inline bool system_supports_16kb_granule(void) @@ -681,7 +683,8 @@ static inline bool system_supports_16kb_granule(void) val = cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_TGRAN16_SHIFT); - return val == ID_AA64MMFR0_TGRAN16_SUPPORTED; + return (val >= ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN) && + (val <= ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX); } static inline bool system_supports_mixed_endian_el0(void) diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index b83fb24954b7..3198acb2aad8 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -149,8 +149,17 @@ ubfx x1, x1, #ID_AA64MMFR0_FGT_SHIFT, #4 cbz x1, .Lskip_fgt_\@ - msr_s SYS_HDFGRTR_EL2, xzr - msr_s SYS_HDFGWTR_EL2, xzr + mov x0, xzr + mrs x1, id_aa64dfr0_el1 + ubfx x1, x1, #ID_AA64DFR0_PMSVER_SHIFT, #4 + cmp x1, #3 + b.lt .Lset_fgt_\@ + /* Disable PMSNEVFR_EL1 read and write traps */ + orr x0, x0, #(1 << 62) + +.Lset_fgt_\@: + msr_s SYS_HDFGRTR_EL2, x0 + msr_s SYS_HDFGWTR_EL2, x0 msr_s SYS_HFGRTR_EL2, xzr msr_s SYS_HFGWTR_EL2, xzr msr_s SYS_HFGITR_EL2, xzr diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 8d1c8dcb87fd..97932fbf973d 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -213,10 +213,8 @@ typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; /* AArch32 EABI. */ #define EF_ARM_EABI_MASK 0xff000000 -#define compat_elf_check_arch(x) (system_supports_32bit_el0() && \ - ((x)->e_machine == EM_ARM) && \ - ((x)->e_flags & EF_ARM_EABI_MASK)) - +int compat_elf_check_arch(const struct elf32_hdr *); +#define compat_elf_check_arch compat_elf_check_arch #define compat_start_thread compat_start_thread /* * Unlike the native SET_PERSONALITY macro, the compat version maintains diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 4afbc45b8bb0..339477dca551 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -55,8 +55,8 @@ asmlinkage void el0t_32_error_handler(struct pt_regs *regs); asmlinkage void call_on_irq_stack(struct pt_regs *regs, void (*func)(struct pt_regs *)); -asmlinkage void enter_from_user_mode(void); -asmlinkage void exit_to_user_mode(void); +asmlinkage void asm_exit_to_user_mode(struct pt_regs *regs); + void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs); void do_undefinstr(struct pt_regs *regs); void do_bti(struct pt_regs *regs); @@ -73,6 +73,7 @@ void do_el0_svc(struct pt_regs *regs); void do_el0_svc_compat(struct pt_regs *regs); void do_ptrauth_fault(struct pt_regs *regs, unsigned int esr); void do_serror(struct pt_regs *regs, unsigned int esr); +void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); void panic_bad_stack(struct pt_regs *regs, unsigned int esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index c072161d5c65..9a62884183e5 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -45,7 +45,6 @@ extern void fpsimd_preserve_current_state(void); extern void fpsimd_restore_current_state(void); extern void fpsimd_update_current_state(struct user_fpsimd_state const *state); -extern void fpsimd_bind_task_to_cpu(void); extern void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *state, void *sve_state, unsigned int sve_vl); diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 059204477ce6..00a2c0b69c2b 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -94,6 +94,7 @@ .endm /* SVE instruction encodings for non-SVE-capable assemblers */ +/* (pre binutils 2.28, all kernel capable clang versions support SVE) */ /* STR (vector): STR Z\nz, [X\nxbase, #\offset, MUL VL] */ .macro _sve_str_v nz, nxbase, offset=0 diff --git a/arch/arm64/include/asm/kernel-pgtable.h b/arch/arm64/include/asm/kernel-pgtable.h index 3512184cfec1..96dc0f7da258 100644 --- a/arch/arm64/include/asm/kernel-pgtable.h +++ b/arch/arm64/include/asm/kernel-pgtable.h @@ -65,8 +65,8 @@ #define EARLY_KASLR (0) #endif -#define EARLY_ENTRIES(vstart, vend, shift) (((vend) >> (shift)) \ - - ((vstart) >> (shift)) + 1 + EARLY_KASLR) +#define EARLY_ENTRIES(vstart, vend, shift) \ + ((((vend) - 1) >> (shift)) - ((vstart) >> (shift)) + 1 + EARLY_KASLR) #define EARLY_PGDS(vstart, vend) (EARLY_ENTRIES(vstart, vend, PGDIR_SHIFT)) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 824a3655dd93..f1745a843414 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -243,9 +243,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) #ifdef CONFIG_KASAN_HW_TAGS #define arch_enable_tagging_sync() mte_enable_kernel_sync() #define arch_enable_tagging_async() mte_enable_kernel_async() -#define arch_set_tagging_report_once(state) mte_set_report_once(state) #define arch_force_async_tag_fault() mte_check_tfsr_exit() -#define arch_init_tags(max_tag) mte_init_tags(max_tag) #define arch_get_random_tag() mte_get_random_tag() #define arch_get_mem_tag(addr) mte_get_mem_tag(addr) #define arch_set_mem_tag_range(addr, size, tag, init) \ diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index 75beffe2ee8a..e9c30859f80c 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -27,11 +27,32 @@ typedef struct { } mm_context_t; /* - * This macro is only used by the TLBI and low-level switch_mm() code, - * neither of which can race with an ASID change. We therefore don't - * need to reload the counter using atomic64_read(). + * We use atomic64_read() here because the ASID for an 'mm_struct' can + * be reallocated when scheduling one of its threads following a + * rollover event (see new_context() and flush_context()). In this case, + * a concurrent TLBI (e.g. via try_to_unmap_one() and ptep_clear_flush()) + * may use a stale ASID. This is fine in principle as the new ASID is + * guaranteed to be clean in the TLB, but the TLBI routines have to take + * care to handle the following race: + * + * CPU 0 CPU 1 CPU 2 + * + * // ptep_clear_flush(mm) + * xchg_relaxed(pte, 0) + * DSB ISHST + * old = ASID(mm) + * | <rollover> + * | new = new_context(mm) + * \-----------------> atomic_set(mm->context.id, new) + * cpu_switch_mm(mm) + * // Hardware walk of pte using new ASID + * TLBI(old) + * + * In this scenario, the barrier on CPU 0 and the dependency on CPU 1 + * ensure that the page-table walker on CPU 1 *must* see the invalid PTE + * written by CPU 0. */ -#define ASID(mm) ((mm)->context.id.counter & 0xffff) +#define ASID(mm) (atomic64_read(&(mm)->context.id) & 0xffff) static inline bool arm64_kernel_unmapped_at_el0(void) { diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index eeb210997149..f4ba93d4ffeb 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -231,6 +231,19 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next, update_saved_ttbr0(tsk, next); } +static inline const struct cpumask * +task_cpu_possible_mask(struct task_struct *p) +{ + if (!static_branch_unlikely(&arm64_mismatched_32bit_el0)) + return cpu_possible_mask; + + if (!is_compat_thread(task_thread_info(p))) + return cpu_possible_mask; + + return system_32bit_el0_cpumask(); +} +#define task_cpu_possible_mask task_cpu_possible_mask + void verify_cpu_asid_bits(void); void post_ttbr_update_workaround(void); diff --git a/arch/arm64/include/asm/mte-kasan.h b/arch/arm64/include/asm/mte-kasan.h index d952352bd008..22420e1f8c03 100644 --- a/arch/arm64/include/asm/mte-kasan.h +++ b/arch/arm64/include/asm/mte-kasan.h @@ -130,10 +130,6 @@ static inline void mte_set_mem_tag_range(void *addr, size_t size, u8 tag, void mte_enable_kernel_sync(void); void mte_enable_kernel_async(void); -void mte_init_tags(u64 max_tag); - -void mte_set_report_once(bool state); -bool mte_report_once(void); #else /* CONFIG_ARM64_MTE */ @@ -165,19 +161,6 @@ static inline void mte_enable_kernel_async(void) { } -static inline void mte_init_tags(u64 max_tag) -{ -} - -static inline void mte_set_report_once(bool state) -{ -} - -static inline bool mte_report_once(void) -{ - return false; -} - #endif /* CONFIG_ARM64_MTE */ #endif /* __ASSEMBLY__ */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 58c7f80f5596..3f93b9e0b339 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -16,8 +16,6 @@ #include <asm/pgtable-types.h> -extern u64 gcr_kernel_excl; - void mte_clear_page_tags(void *addr); unsigned long mte_copy_tags_from_user(void *to, const void __user *from, unsigned long n); @@ -43,7 +41,6 @@ void mte_copy_page_tags(void *kto, const void *kfrom); void mte_thread_init_user(void); void mte_thread_switch(struct task_struct *next); void mte_suspend_enter(void); -void mte_suspend_exit(void); long set_mte_ctrl(struct task_struct *task, unsigned long arg); long get_mte_ctrl(struct task_struct *task); int mte_ptrace_copy_tags(struct task_struct *child, long request, @@ -72,9 +69,6 @@ static inline void mte_thread_switch(struct task_struct *next) static inline void mte_suspend_enter(void) { } -static inline void mte_suspend_exit(void) -{ -} static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg) { return 0; diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f09bf5c02891..dfa76afa0ccf 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -715,7 +715,7 @@ static inline pud_t *p4d_pgtable(p4d_t p4d) return (pud_t *)__va(p4d_page_paddr(p4d)); } -/* Find an entry in the frst-level page table. */ +/* Find an entry in the first-level page table. */ #define pud_offset_phys(dir, addr) (p4d_page_paddr(READ_ONCE(*(dir))) + pud_index(addr) * sizeof(pud_t)) #define pud_set_fixmap(addr) ((pud_t *)set_fixmap_offset(FIX_PUD, addr)) diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index 28a78b67d9b4..efb098de3a84 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -10,6 +10,9 @@ #include <asm/memory.h> #include <asm/sysreg.h> +#define PR_PAC_ENABLED_KEYS_MASK \ + (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) + #ifdef CONFIG_ARM64_PTR_AUTH /* * Each key is a 128-bit quantity which is split across a pair of 64-bit @@ -117,9 +120,9 @@ static __always_inline void ptrauth_enable(void) \ /* enable all keys */ \ if (system_supports_address_auth()) \ - set_task_sctlr_el1(current->thread.sctlr_user | \ - SCTLR_ELx_ENIA | SCTLR_ELx_ENIB | \ - SCTLR_ELx_ENDA | SCTLR_ELx_ENDB); \ + ptrauth_set_enabled_keys(current, \ + PR_PAC_ENABLED_KEYS_MASK, \ + PR_PAC_ENABLED_KEYS_MASK); \ } while (0) #define ptrauth_thread_switch_user(tsk) \ @@ -146,7 +149,4 @@ static __always_inline void ptrauth_enable(void) #define ptrauth_thread_switch_kernel(tsk) #endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */ -#define PR_PAC_ENABLED_KEYS_MASK \ - (PR_PAC_APIAKEY | PR_PAC_APIBKEY | PR_PAC_APDAKEY | PR_PAC_APDBKEY) - #endif /* __ASM_POINTER_AUTH_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index b6517fd03d7b..ee2bdc1b9f5b 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -16,6 +16,12 @@ */ #define NET_IP_ALIGN 0 +#define MTE_CTRL_GCR_USER_EXCL_SHIFT 0 +#define MTE_CTRL_GCR_USER_EXCL_MASK 0xffff + +#define MTE_CTRL_TCF_SYNC (1UL << 16) +#define MTE_CTRL_TCF_ASYNC (1UL << 17) + #ifndef __ASSEMBLY__ #include <linux/build_bug.h> @@ -153,7 +159,7 @@ struct thread_struct { #endif #endif #ifdef CONFIG_ARM64_MTE - u64 gcr_user_excl; + u64 mte_ctrl; #endif u64 sctlr_user; }; @@ -253,7 +259,7 @@ extern void release_thread(struct task_struct *); unsigned long get_wchan(struct task_struct *p); -void set_task_sctlr_el1(u64 sctlr); +void update_sctlr_el1(u64 sctlr); /* Thread switching */ extern struct task_struct *cpu_switch_to(struct task_struct *prev, diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h index 89cba2622b79..6a75d7ecdcaa 100644 --- a/arch/arm64/include/asm/simd.h +++ b/arch/arm64/include/asm/simd.h @@ -37,7 +37,7 @@ static __must_check inline bool may_use_simd(void) */ return !WARN_ON(!system_capabilities_finalized()) && system_supports_fpsimd() && - !in_irq() && !irqs_disabled() && !in_nmi() && + !in_hardirq() && !irqs_disabled() && !in_nmi() && !this_cpu_read(fpsimd_context_busy); } diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 7b9c3acba684..f2e06e7c0a31 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -11,6 +11,7 @@ #include <linux/bits.h> #include <linux/stringify.h> +#include <linux/kasan-tags.h> /* * ARMv8 ARM reserves the following encoding for system registers: @@ -698,8 +699,7 @@ (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA | SCTLR_EL1_SA0 | \ SCTLR_EL1_SED | SCTLR_ELx_I | SCTLR_EL1_DZE | SCTLR_EL1_UCT | \ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN | SCTLR_ELx_ITFSB | \ - SCTLR_ELx_ATA | SCTLR_EL1_ATA0 | ENDIAN_SET_EL1 | SCTLR_EL1_UCI | \ - SCTLR_EL1_EPAN | SCTLR_EL1_RES1) + ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_EPAN | SCTLR_EL1_RES1) /* MAIR_ELx memory attributes (used by Linux) */ #define MAIR_ATTR_DEVICE_nGnRnE UL(0x00) @@ -847,12 +847,16 @@ #define ID_AA64MMFR0_ASID_SHIFT 4 #define ID_AA64MMFR0_PARANGE_SHIFT 0 -#define ID_AA64MMFR0_TGRAN4_NI 0xf -#define ID_AA64MMFR0_TGRAN4_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN64_NI 0xf -#define ID_AA64MMFR0_TGRAN64_SUPPORTED 0x0 -#define ID_AA64MMFR0_TGRAN16_NI 0x0 -#define ID_AA64MMFR0_TGRAN16_SUPPORTED 0x1 +#define ID_AA64MMFR0_TGRAN4_NI 0xf +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN64_NI 0xf +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN 0x0 +#define ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN16_NI 0x0 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN 0x1 +#define ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX 0xf + #define ID_AA64MMFR0_PARANGE_48 0x5 #define ID_AA64MMFR0_PARANGE_52 0x6 @@ -1028,16 +1032,16 @@ #if defined(CONFIG_ARM64_4K_PAGES) #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN4_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN4_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN4_SUPPORTED_MAX #elif defined(CONFIG_ARM64_16K_PAGES) #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN16_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0xF +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN16_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN16_SUPPORTED_MAX #elif defined(CONFIG_ARM64_64K_PAGES) #define ID_AA64MMFR0_TGRAN_SHIFT ID_AA64MMFR0_TGRAN64_SHIFT -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED -#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX 0x7 +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MIN ID_AA64MMFR0_TGRAN64_SUPPORTED_MIN +#define ID_AA64MMFR0_TGRAN_SUPPORTED_MAX ID_AA64MMFR0_TGRAN64_SUPPORTED_MAX #endif #define MVFR2_FPMISC_SHIFT 4 @@ -1067,6 +1071,21 @@ #define SYS_GCR_EL1_RRND (BIT(16)) #define SYS_GCR_EL1_EXCL_MASK 0xffffUL +#ifdef CONFIG_KASAN_HW_TAGS +/* + * KASAN always uses a whole byte for its tags. With CONFIG_KASAN_HW_TAGS it + * only uses tags in the range 0xF0-0xFF, which we map to MTE tags 0x0-0xF. + */ +#define __MTE_TAG_MIN (KASAN_TAG_MIN & 0xf) +#define __MTE_TAG_MAX (KASAN_TAG_MAX & 0xf) +#define __MTE_TAG_INCL GENMASK(__MTE_TAG_MAX, __MTE_TAG_MIN) +#define KERNEL_GCR_EL1_EXCL (SYS_GCR_EL1_EXCL_MASK & ~__MTE_TAG_INCL) +#else +#define KERNEL_GCR_EL1_EXCL SYS_GCR_EL1_EXCL_MASK +#endif + +#define KERNEL_GCR_EL1 (SYS_GCR_EL1_RRND | KERNEL_GCR_EL1_EXCL) + /* RGSR_EL1 Definitions */ #define SYS_RGSR_EL1_TAG_MASK 0xfUL #define SYS_RGSR_EL1_SEED_SHIFT 8 diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index cc3f5a33ff9c..412a3b9a3c25 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -245,9 +245,10 @@ static inline void flush_tlb_all(void) static inline void flush_tlb_mm(struct mm_struct *mm) { - unsigned long asid = __TLBI_VADDR(0, ASID(mm)); + unsigned long asid; dsb(ishst); + asid = __TLBI_VADDR(0, ASID(mm)); __tlbi(aside1is, asid); __tlbi_user(aside1is, asid); dsb(ish); @@ -256,9 +257,10 @@ static inline void flush_tlb_mm(struct mm_struct *mm) static inline void flush_tlb_page_nosync(struct vm_area_struct *vma, unsigned long uaddr) { - unsigned long addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); + unsigned long addr; dsb(ishst); + addr = __TLBI_VADDR(uaddr, ASID(vma->vm_mm)); __tlbi(vale1is, addr); __tlbi_user(vale1is, addr); } @@ -283,9 +285,7 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, { int num = 0; int scale = 0; - unsigned long asid = ASID(vma->vm_mm); - unsigned long addr; - unsigned long pages; + unsigned long asid, addr, pages; start = round_down(start, stride); end = round_up(end, stride); @@ -305,10 +305,11 @@ static inline void __flush_tlb_range(struct vm_area_struct *vma, } dsb(ishst); + asid = ASID(vma->vm_mm); /* * When the CPU does not support TLB range operations, flush the TLB - * entries one by one at the granularity of 'stride'. If the the TLB + * entries one by one at the granularity of 'stride'. If the TLB * range ops are supported, then: * * 1. If 'pages' is odd, flush the first page through non-range |