diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/arch_gicv3.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/barrier.h | 11 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/efi.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/el2_setup.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/esr.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 30 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimdmacros.h | 22 | ||||
-rw-r--r-- | arch/arm64/include/asm/ftrace.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/hwcap.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/insn.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/irqflags.h | 191 | ||||
-rw-r--r-- | arch/arm64/include/asm/linkage.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/patching.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/ptrace.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/scs.h | 7 | ||||
-rw-r--r-- | arch/arm64/include/asm/sysreg.h | 106 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/hwcap.h | 6 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sigcontext.h | 27 |
21 files changed, 286 insertions, 192 deletions
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h index 48d4473e8eee..01281a5336cf 100644 --- a/arch/arm64/include/asm/arch_gicv3.h +++ b/arch/arm64/include/asm/arch_gicv3.h @@ -190,5 +190,10 @@ static inline void gic_arch_enable_irqs(void) asm volatile ("msr daifclr, #3" : : : "memory"); } +static inline bool gic_has_relaxed_pmr_sync(void) +{ + return cpus_have_cap(ARM64_HAS_GIC_PRIO_RELAXED_SYNC); +} + #endif /* __ASSEMBLY__ */ #endif /* __ASM_ARCH_GICV3_H */ diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 2cfc4245d2e2..3dd8982a9ce3 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -11,6 +11,8 @@ #include <linux/kasan-checks.h> +#include <asm/alternative-macros.h> + #define __nops(n) ".rept " #n "\nnop\n.endr\n" #define nops(n) asm volatile(__nops(n)) @@ -41,10 +43,11 @@ #ifdef CONFIG_ARM64_PSEUDO_NMI #define pmr_sync() \ do { \ - extern struct static_key_false gic_pmr_sync; \ - \ - if (static_branch_unlikely(&gic_pmr_sync)) \ - dsb(sy); \ + asm volatile( \ + ALTERNATIVE_CB("dsb sy", \ + ARM64_HAS_GIC_PRIO_RELAXED_SYNC, \ + alt_cb_patch_nops) \ + ); \ } while(0) #else #define pmr_sync() do {} while (0) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 03d1c9d7af82..6bf013fb110d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -769,6 +769,12 @@ static __always_inline bool system_supports_sme(void) cpus_have_const_cap(ARM64_SME); } +static __always_inline bool system_supports_sme2(void) +{ + return IS_ENABLED(CONFIG_ARM64_SME) && + cpus_have_const_cap(ARM64_SME2); +} + static __always_inline bool system_supports_fa64(void) { return IS_ENABLED(CONFIG_ARM64_SME) && @@ -806,7 +812,7 @@ static inline bool system_has_full_ptr_auth(void) static __always_inline bool system_uses_irq_prio_masking(void) { return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && - cpus_have_const_cap(ARM64_HAS_IRQ_PRIO_MASKING); + cpus_have_const_cap(ARM64_HAS_GIC_PRIO_MASKING); } static inline bool system_supports_mte(void) @@ -864,7 +870,11 @@ static inline bool cpu_has_hw_af(void) if (!IS_ENABLED(CONFIG_ARM64_HW_AFDBM)) return false; - mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); + /* + * Use cached version to avoid emulated msr operation on KVM + * guests. + */ + mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); return cpuid_feature_extract_unsigned_field(mmfr1, ID_AA64MMFR1_EL1_HAFDBS_SHIFT); } diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index de4ff90785b2..acaa39f6381a 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -114,6 +114,8 @@ static inline unsigned long efi_get_kimg_min_align(void) #define EFI_ALLOC_ALIGN SZ_64K #define EFI_ALLOC_LIMIT ((1UL << 48) - 1) +extern unsigned long primary_entry_offset(void); + /* * On ARM systems, virtually remapped UEFI runtime services are set up in two * distinct stages: diff --git a/arch/arm64/include/asm/el2_setup.h b/arch/arm64/include/asm/el2_setup.h index 668569adf4d3..2cdd010f9524 100644 --- a/arch/arm64/include/asm/el2_setup.h +++ b/arch/arm64/include/asm/el2_setup.h @@ -53,10 +53,10 @@ cbz x0, .Lskip_spe_\@ // Skip if SPE not present mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2, - and x0, x0, #(1 << SYS_PMBIDR_EL1_P_SHIFT) + and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT) cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical - mov x0, #(1 << SYS_PMSCR_EL2_PCT_SHIFT | \ - 1 << SYS_PMSCR_EL2_PA_SHIFT) + mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \ + 1 << PMSCR_EL2_PA_SHIFT) msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter .Lskip_spe_el2_\@: mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT) @@ -177,7 +177,7 @@ /** * Initialize EL2 registers to sane values. This should be called early on all * cores that were booted in EL2. Note that everything gets initialised as - * if VHE was not evailable. The kernel context will be upgraded to VHE + * if VHE was not available. The kernel context will be upgraded to VHE * if possible later on in the boot process * * Regs: x0, x1 and x2 are clobbered. diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 206de10524e3..c9f15b9e3c71 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -350,6 +350,7 @@ #define ESR_ELx_SME_ISS_ILL 1 #define ESR_ELx_SME_ISS_SM_DISABLED 2 #define ESR_ELx_SME_ISS_ZA_DISABLED 3 +#define ESR_ELx_SME_ISS_ZT_DISABLED 4 #ifndef __ASSEMBLY__ #include <asm/types.h> diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index e6fa1e2982c8..67f2fb781f59 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -61,7 +61,7 @@ extern void fpsimd_kvm_prepare(void); struct cpu_fp_state { struct user_fpsimd_state *st; void *sve_state; - void *za_state; + void *sme_state; u64 *svcr; unsigned int sve_vl; unsigned int sme_vl; @@ -105,6 +105,13 @@ static inline void *sve_pffr(struct thread_struct *thread) return (char *)thread->sve_state + sve_ffr_offset(vl); } +static inline void *thread_zt_state(struct thread_struct *thread) +{ + /* The ZT register state is stored immediately after the ZA state */ + unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread)); + return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq); +} + extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr); extern void sve_load_state(void const *state, u32 const *pfpsr, int restore_ffr); @@ -112,12 +119,13 @@ extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1); extern unsigned int sve_get_vl(void); extern void sve_set_vq(unsigned long vq_minus_1); extern void sme_set_vq(unsigned long vq_minus_1); -extern void za_save_state(void *state); -extern void za_load_state(void const *state); +extern void sme_save_state(void *state, int zt); +extern void sme_load_state(void const *state, int zt); struct arm64_cpu_capabilities; extern void sve_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void sme_kernel_enable(const struct arm64_cpu_capabilities *__unused); +extern void sme2_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern void fa64_kernel_enable(const struct arm64_cpu_capabilities *__unused); extern u64 read_zcr_features(void); @@ -355,14 +363,20 @@ extern int sme_get_current_vl(void); /* * Return how many bytes of memory are required to store the full SME - * specific state (currently just ZA) for task, given task's currently - * configured vector length. + * specific state for task, given task's currently configured vector + * length. */ -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { unsigned int vl = task_get_sme_vl(task); + size_t size; + + size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + + if (system_supports_sme2()) + size += ZT_SIG_REG_SIZE; - return ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl)); + return size; } #else @@ -382,7 +396,7 @@ static inline int sme_max_virtualisable_vl(void) { return 0; } static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; } static inline int sme_get_current_vl(void) { return -EINVAL; } -static inline size_t za_state_size(struct task_struct const *task) +static inline size_t sme_state_size(struct task_struct const *task) { return 0; } diff --git a/arch/arm64/include/asm/fpsimdmacros.h b/arch/arm64/include/asm/fpsimdmacros.h index 5e0910cf4832..cd03819a3b68 100644 --- a/arch/arm64/include/asm/fpsimdmacros.h +++ b/arch/arm64/include/asm/fpsimdmacros.h @@ -221,6 +221,28 @@ .endm /* + * LDR (ZT0) + * + * LDR ZT0, nx + */ +.macro _ldr_zt nx + _check_general_reg \nx + .inst 0xe11f8000 \ + | (\nx << 5) +.endm + +/* + * STR (ZT0) + * + * STR ZT0, nx + */ +.macro _str_zt nx + _check_general_reg \nx + .inst 0xe13f8000 \ + | (\nx << 5) +.endm + +/* * Zero the entire ZA array * ZERO ZA */ diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h index 5664729800ae..1c2672bbbf37 100644 --- a/arch/arm64/include/asm/ftrace.h +++ b/arch/arm64/include/asm/ftrace.h @@ -62,20 +62,7 @@ extern unsigned long ftrace_graph_call; extern void return_to_handler(void); -static inline unsigned long ftrace_call_adjust(unsigned long addr) -{ - /* - * Adjust addr to point at the BL in the callsite. - * See ftrace_init_nop() for the callsite sequence. - */ - if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_ARGS)) - return addr + AARCH64_INSN_SIZE; - /* - * addr is the address of the mcount call instruction. - * recordmcount does the necessary offset calculation. - */ - return addr; -} +unsigned long ftrace_call_adjust(unsigned long addr); #ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS struct dyn_ftrace; diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index 06dd12c514e6..5d45f19fda7f 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -31,12 +31,20 @@ #define COMPAT_HWCAP_VFPD32 (1 << 19) #define COMPAT_HWCAP_LPAE (1 << 20) #define COMPAT_HWCAP_EVTSTRM (1 << 21) +#define COMPAT_HWCAP_FPHP (1 << 22) +#define COMPAT_HWCAP_ASIMDHP (1 << 23) +#define COMPAT_HWCAP_ASIMDDP (1 << 24) +#define COMPAT_HWCAP_ASIMDFHM (1 << 25) +#define COMPAT_HWCAP_ASIMDBF16 (1 << 26) +#define COMPAT_HWCAP_I8MM (1 << 27) #define COMPAT_HWCAP2_AES (1 << 0) #define COMPAT_HWCAP2_PMULL (1 << 1) #define COMPAT_HWCAP2_SHA1 (1 << 2) #define COMPAT_HWCAP2_SHA2 (1 << 3) #define COMPAT_HWCAP2_CRC32 (1 << 4) +#define COMPAT_HWCAP2_SB (1 << 5) +#define COMPAT_HWCAP2_SSBS (1 << 6) #ifndef __ASSEMBLY__ #include <linux/log2.h> @@ -123,6 +131,12 @@ #define KERNEL_HWCAP_CSSC __khwcap2_feature(CSSC) #define KERNEL_HWCAP_RPRFM __khwcap2_feature(RPRFM) #define KERNEL_HWCAP_SVE2P1 __khwcap2_feature(SVE2P1) +#define KERNEL_HWCAP_SME2 __khwcap2_feature(SME2) +#define KERNEL_HWCAP_SME2P1 __khwcap2_feature(SME2P1) +#define KERNEL_HWCAP_SME_I16I32 __khwcap2_feature(SME_I16I32) +#define KERNEL_HWCAP_SME_BI32I32 __khwcap2_feature(SME_BI32I32) +#define KERNEL_HWCAP_SME_B16B16 __khwcap2_feature(SME_B16B16) +#define KERNEL_HWCAP_SME_F16F16 __khwcap2_feature(SME_F16F16) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/insn.h b/arch/arm64/include/asm/insn.h index aaf1f52fbf3e..139a88e4e852 100644 --- a/arch/arm64/include/asm/insn.h +++ b/arch/arm64/include/asm/insn.h @@ -420,6 +420,7 @@ __AARCH64_INSN_FUNCS(sb, 0xFFFFFFFF, 0xD50330FF) __AARCH64_INSN_FUNCS(clrex, 0xFFFFF0FF, 0xD503305F) __AARCH64_INSN_FUNCS(ssbb, 0xFFFFFFFF, 0xD503309F) __AARCH64_INSN_FUNCS(pssbb, 0xFFFFFFFF, 0xD503349F) +__AARCH64_INSN_FUNCS(bti, 0xFFFFFF3F, 0xD503241f) #undef __AARCH64_INSN_FUNCS diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index b57b9b1e4344..e0f5f6b73edd 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -21,43 +21,77 @@ * exceptions should be unmasked. */ -/* - * CPU interrupt mask handling. - */ -static inline void arch_local_irq_enable(void) +static __always_inline bool __irqflags_uses_pmr(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && + alternative_has_feature_unlikely(ARM64_HAS_GIC_PRIO_MASKING); +} +static __always_inline void __daif_local_irq_enable(void) +{ + barrier(); + asm volatile("msr daifclr, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_enable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifclr, #3 // arch_local_irq_enable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQON) - : "memory"); - + barrier(); + write_sysreg_s(GIC_PRIO_IRQON, SYS_ICC_PMR_EL1); pmr_sync(); + barrier(); } -static inline void arch_local_irq_disable(void) +static inline void arch_local_irq_enable(void) { - if (system_has_prio_mask_debugging()) { - u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_enable(); + } else { + __daif_local_irq_enable(); + } +} +static __always_inline void __daif_local_irq_disable(void) +{ + barrier(); + asm volatile("msr daifset, #3"); + barrier(); +} + +static __always_inline void __pmr_local_irq_disable(void) +{ + if (IS_ENABLED(CONFIG_ARM64_DEBUG_PRIORITY_MASKING)) { + u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1); WARN_ON_ONCE(pmr != GIC_PRIO_IRQON && pmr != GIC_PRIO_IRQOFF); } - asm volatile(ALTERNATIVE( - "msr daifset, #3 // arch_local_irq_disable", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" ((unsigned long) GIC_PRIO_IRQOFF) - : "memory"); + barrier(); + write_sysreg_s(GIC_PRIO_IRQOFF, SYS_ICC_PMR_EL1); + barrier(); +} + +static inline void arch_local_irq_disable(void) +{ + if (__irqflags_uses_pmr()) { + __pmr_local_irq_disable(); + } else { + __daif_local_irq_disable(); + } +} + +static __always_inline unsigned long __daif_local_save_flags(void) +{ + return read_sysreg(daif); +} + +static __always_inline unsigned long __pmr_local_save_flags(void) +{ + return read_sysreg_s(SYS_ICC_PMR_EL1); } /* @@ -65,69 +99,108 @@ static inline void arch_local_irq_disable(void) */ static inline unsigned long arch_local_save_flags(void) { - unsigned long flags; + if (__irqflags_uses_pmr()) { + return __pmr_local_save_flags(); + } else { + return __daif_local_save_flags(); + } +} - asm volatile(ALTERNATIVE( - "mrs %0, daif", - __mrs_s("%0", SYS_ICC_PMR_EL1), - ARM64_HAS_IRQ_PRIO_MASKING) - : "=&r" (flags) - : - : "memory"); +static __always_inline bool __daif_irqs_disabled_flags(unsigned long flags) +{ + return flags & PSR_I_BIT; +} - return flags; +static __always_inline bool __pmr_irqs_disabled_flags(unsigned long flags) +{ + return flags != GIC_PRIO_IRQON; } -static inline int arch_irqs_disabled_flags(unsigned long flags) +static inline bool arch_irqs_disabled_flags(unsigned long flags) { - int res; + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled_flags(flags); + } else { + return __daif_irqs_disabled_flags(flags); + } +} - asm volatile(ALTERNATIVE( - "and %w0, %w1, #" __stringify(PSR_I_BIT), - "eor %w0, %w1, #" __stringify(GIC_PRIO_IRQON), - ARM64_HAS_IRQ_PRIO_MASKING) - : "=&r" (res) - : "r" ((int) flags) - : "memory"); +static __always_inline bool __daif_irqs_disabled(void) +{ + return __daif_irqs_disabled_flags(__daif_local_save_flags()); +} - return res; +static __always_inline bool __pmr_irqs_disabled(void) +{ + return __pmr_irqs_disabled_flags(__pmr_local_save_flags()); } -static inline int arch_irqs_disabled(void) +static inline bool arch_irqs_disabled(void) { - return arch_irqs_disabled_flags(arch_local_save_flags()); + if (__irqflags_uses_pmr()) { + return __pmr_irqs_disabled(); + } else { + return __daif_irqs_disabled(); + } } -static inline unsigned long arch_local_irq_save(void) +static __always_inline unsigned long __daif_local_irq_save(void) { - unsigned long flags; + unsigned long flags = __daif_local_save_flags(); + + __daif_local_irq_disable(); + + return flags; +} - flags = arch_local_save_flags(); +static __always_inline unsigned long __pmr_local_irq_save(void) +{ + unsigned long flags = __pmr_local_save_flags(); /* * There are too many states with IRQs disabled, just keep the current * state if interrupts are already disabled/masked. */ - if (!arch_irqs_disabled_flags(flags)) - arch_local_irq_disable(); + if (!__pmr_irqs_disabled_flags(flags)) + __pmr_local_irq_disable(); return flags; } +static inline unsigned long arch_local_irq_save(void) +{ + if (__irqflags_uses_pmr()) { + return __pmr_local_irq_save(); + } else { + return __daif_local_irq_save(); + } +} + +static __always_inline void __daif_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg(flags, daif); + barrier(); +} + +static __always_inline void __pmr_local_irq_restore(unsigned long flags) +{ + barrier(); + write_sysreg_s(flags, SYS_ICC_PMR_EL1); + pmr_sync(); + barrier(); +} + /* * restore saved IRQ state */ static inline void arch_local_irq_restore(unsigned long flags) { - asm volatile(ALTERNATIVE( - "msr daif, %0", - __msr_s(SYS_ICC_PMR_EL1, "%0"), - ARM64_HAS_IRQ_PRIO_MASKING) - : - : "r" (flags) - : "memory"); - - pmr_sync(); + if (__irqflags_uses_pmr()) { + __pmr_local_irq_restore(flags); + } else { + __daif_local_irq_restore(flags); + } } #endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/arm64/include/asm/linkage.h b/arch/arm64/include/asm/linkage.h index 1436fa1cde24..d3acd9c87509 100644 --- a/arch/arm64/include/asm/linkage.h +++ b/arch/arm64/include/asm/linkage.h @@ -5,8 +5,8 @@ #include <asm/assembler.h> #endif -#define __ALIGN .align 2 -#define __ALIGN_STR ".align 2" +#define __ALIGN .balign CONFIG_FUNCTION_ALIGNMENT +#define __ALIGN_STR ".balign " #CONFIG_FUNCTION_ALIGNMENT /* * When using in-kernel BTI we need to ensure that PCS-conformant diff --git a/arch/arm64/include/asm/patching.h b/arch/arm64/include/asm/patching.h index 6bf5adc56295..68908b82b168 100644 --- a/arch/arm64/include/asm/patching.h +++ b/arch/arm64/include/asm/patching.h @@ -7,6 +7,8 @@ int aarch64_insn_read(void *addr, u32 *insnp); int aarch64_insn_write(void *addr, u32 insn); +int aarch64_insn_write_literal_u64(void *addr, u64 val); + int aarch64_insn_patch_text_nosync(void *addr, u32 insn); int aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt); diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 65e78999c75d..27455bfd64bc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -275,6 +275,7 @@ static inline void set_pte(pte_t *ptep, pte_t pte) } extern void __sync_icache_dcache(pte_t pteval); +bool pgattr_change_is_safe(u64 old, u64 new); /* * PTE bits configuration in the presence of hardware Dirty Bit Management @@ -292,7 +293,7 @@ extern void __sync_icache_dcache(pte_t pteval); * PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY) */ -static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, +static inline void __check_safe_pte_update(struct mm_struct *mm, pte_t *ptep, pte_t pte) { pte_t old_pte; @@ -318,6 +319,9 @@ static inline void __check_racy_pte_update(struct mm_struct *mm, pte_t *ptep, VM_WARN_ONCE(pte_write(old_pte) && !pte_dirty(pte), "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx", __func__, pte_val(old_pte), pte_val(pte)); + VM_WARN_ONCE(!pgattr_change_is_safe(pte_val(old_pte), pte_val(pte)), + "%s: unsafe attribute change: 0x%016llx -> 0x%016llx", + __func__, pte_val(old_pte), pte_val(pte)); } static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, @@ -346,7 +350,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, mte_sync_tags(old_pte, pte); } - __check_racy_pte_update(mm, ptep, pte); + __check_safe_pte_update(mm, ptep, pte); set_pte(ptep, pte); } diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index d51b32a69309..3918f2a67970 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -161,7 +161,7 @@ struct thread_struct { enum fp_type fp_type; /* registers FPSIMD or SVE? */ unsigned int fpsimd_cpu; void *sve_state; /* SVE registers, if any */ - void *za_state; /* ZA register, if any */ + void *sme_state; /* ZA and ZT state, if any */ unsigned int vl[ARM64_VEC_MAX]; /* vector length */ unsigned int vl_onexec[ARM64_VEC_MAX]; /* vl after next exec */ unsigned long fault_address; /* fault info */ diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 41b332c054ab..47ec58031f11 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -194,7 +194,7 @@ struct pt_regs { u32 unused2; #endif u64 sdei_ttbr1; - /* Only valid when ARM64_HAS_IRQ_PRIO_MASKING is enabled. */ + /* Only valid when ARM64_HAS_GIC_PRIO_MASKING is enabled. */ u64 pmr_save; u64 stackframe[2]; diff --git a/arch/arm64/include/asm/scs.h b/arch/arm64/include/asm/scs.h index ff7da1268a52..13df982a0808 100644 --- a/arch/arm64/include/asm/scs.h +++ b/arch/arm64/include/asm/scs.h @@ -10,15 +10,16 @@ #ifdef CONFIG_SHADOW_CALL_STACK scs_sp .req x18 - .macro scs_load tsk - ldr scs_sp, [\tsk, #TSK_TI_SCS_SP] + .macro scs_load_current + get_current_task scs_sp + ldr scs_sp, [scs_sp, #TSK_TI_SCS_SP] .endm .macro scs_save tsk str scs_sp, [\tsk, #TSK_TI_SCS_SP] .endm #else - .macro scs_load tsk + .macro scs_load_current .endm .macro scs_save tsk diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 1312fb48f18b..043ecc3405e7 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -216,101 +216,22 @@ #define SYS_PAR_EL1_FST GENMASK(6, 1) /*** Statistical Profiling Extension ***/ -/* ID registers */ -#define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -#define SYS_PMSIDR_EL1_FE_SHIFT 0 -#define SYS_PMSIDR_EL1_FT_SHIFT 1 -#define SYS_PMSIDR_EL1_FL_SHIFT 2 -#define SYS_PMSIDR_EL1_ARCHINST_SHIFT 3 -#define SYS_PMSIDR_EL1_LDS_SHIFT 4 -#define SYS_PMSIDR_EL1_ERND_SHIFT 5 -#define SYS_PMSIDR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIDR_EL1_INTERVAL_MASK 0xfUL -#define SYS_PMSIDR_EL1_MAXSIZE_SHIFT 12 -#define SYS_PMSIDR_EL1_MAXSIZE_MASK 0xfUL -#define SYS_PMSIDR_EL1_COUNTSIZE_SHIFT 16 -#define SYS_PMSIDR_EL1_COUNTSIZE_MASK 0xfUL - -#define SYS_PMBIDR_EL1 sys_reg(3, 0, 9, 10, 7) -#define SYS_PMBIDR_EL1_ALIGN_SHIFT 0 -#define SYS_PMBIDR_EL1_ALIGN_MASK 0xfU -#define SYS_PMBIDR_EL1_P_SHIFT 4 -#define SYS_PMBIDR_EL1_F_SHIFT 5 - -/* Sampling controls */ -#define SYS_PMSCR_EL1 sys_reg(3, 0, 9, 9, 0) -#define SYS_PMSCR_EL1_E0SPE_SHIFT 0 -#define SYS_PMSCR_EL1_E1SPE_SHIFT 1 -#define SYS_PMSCR_EL1_CX_SHIFT 3 -#define SYS_PMSCR_EL1_PA_SHIFT 4 -#define SYS_PMSCR_EL1_TS_SHIFT 5 -#define SYS_PMSCR_EL1_PCT_SHIFT 6 - -#define SYS_PMSCR_EL2 sys_reg(3, 4, 9, 9, 0) -#define SYS_PMSCR_EL2_E0HSPE_SHIFT 0 -#define SYS_PMSCR_EL2_E2SPE_SHIFT 1 -#define SYS_PMSCR_EL2_CX_SHIFT 3 -#define SYS_PMSCR_EL2_PA_SHIFT 4 -#define SYS_PMSCR_EL2_TS_SHIFT 5 -#define SYS_PMSCR_EL2_PCT_SHIFT 6 - -#define SYS_PMSICR_EL1 sys_reg(3, 0, 9, 9, 2) - -#define SYS_PMSIRR_EL1 sys_reg(3, 0, 9, 9, 3) -#define SYS_PMSIRR_EL1_RND_SHIFT 0 -#define SYS_PMSIRR_EL1_INTERVAL_SHIFT 8 -#define SYS_PMSIRR_EL1_INTERVAL_MASK 0xffffffUL - -/* Filtering controls */ -#define SYS_PMSNEVFR_EL1 sys_reg(3, 0, 9, 9, 1) - -#define SYS_PMSFCR_EL1 sys_reg(3, 0, 9, 9, 4) -#define SYS_PMSFCR_EL1_FE_SHIFT 0 -#define SYS_PMSFCR_EL1_FT_SHIFT 1 -#define SYS_PMSFCR_EL1_FL_SHIFT 2 -#define SYS_PMSFCR_EL1_B_SHIFT 16 -#define SYS_PMSFCR_EL1_LD_SHIFT 17 -#define SYS_PMSFCR_EL1_ST_SHIFT 18 - -#define SYS_PMSEVFR_EL1 sys_reg(3, 0, 9, 9, 5) -#define SYS_PMSEVFR_EL1_RES0_8_2 \ +#define PMSEVFR_EL1_RES0_IMP \ (GENMASK_ULL(47, 32) | GENMASK_ULL(23, 16) | GENMASK_ULL(11, 8) |\ BIT_ULL(6) | BIT_ULL(4) | BIT_ULL(2) | BIT_ULL(0)) -#define SYS_PMSEVFR_EL1_RES0_8_3 \ - (SYS_PMSEVFR_EL1_RES0_8_2 & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) - -#define SYS_PMSLATFR_EL1 sys_reg(3, 0, 9, 9, 6) -#define SYS_PMSLATFR_EL1_MINLAT_SHIFT 0 - -/* Buffer controls */ -#define SYS_PMBLIMITR_EL1 sys_reg(3, 0, 9, 10, 0) -#define SYS_PMBLIMITR_EL1_E_SHIFT 0 -#define SYS_PMBLIMITR_EL1_FM_SHIFT 1 -#define SYS_PMBLIMITR_EL1_FM_MASK 0x3UL -#define SYS_PMBLIMITR_EL1_FM_STOP_IRQ (0 << SYS_PMBLIMITR_EL1_FM_SHIFT) - -#define SYS_PMBPTR_EL1 sys_reg(3, 0, 9, 10, 1) +#define PMSEVFR_EL1_RES0_V1P1 \ + (PMSEVFR_EL1_RES0_IMP & ~(BIT_ULL(18) | BIT_ULL(17) | BIT_ULL(11))) +#define PMSEVFR_EL1_RES0_V1P2 \ + (PMSEVFR_EL1_RES0_V1P1 & ~BIT_ULL(6)) /* Buffer error reporting */ -#define SYS_PMBSR_EL1 sys_reg(3, 0, 9, 10, 3) -#define SYS_PMBSR_EL1_COLL_SHIFT 16 -#define SYS_PMBSR_EL1_S_SHIFT 17 -#define SYS_PMBSR_EL1_EA_SHIFT 18 -#define SYS_PMBSR_EL1_DL_SHIFT 19 -#define SYS_PMBSR_EL1_EC_SHIFT 26 -#define SYS_PMBSR_EL1_EC_MASK 0x3fUL - -#define SYS_PMBSR_EL1_EC_BUF (0x0UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S1 (0x24UL << SYS_PMBSR_EL1_EC_SHIFT) -#define SYS_PMBSR_EL1_EC_FAULT_S2 (0x25UL << SYS_PMBSR_EL1_EC_SHIFT) - -#define SYS_PMBSR_EL1_FAULT_FSC_SHIFT 0 -#define SYS_PMBSR_EL1_FAULT_FSC_MASK 0x3fUL +#define PMBSR_EL1_FAULT_FSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_FAULT_FSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_SHIFT 0 -#define SYS_PMBSR_EL1_BUF_BSC_MASK 0x3fUL +#define PMBSR_EL1_BUF_BSC_SHIFT PMBSR_EL1_MSS_SHIFT +#define PMBSR_EL1_BUF_BSC_MASK PMBSR_EL1_MSS_MASK -#define SYS_PMBSR_EL1_BUF_BSC_FULL (0x1UL << SYS_PMBSR_EL1_BUF_BSC_SHIFT) +#define PMBSR_EL1_BUF_BSC_FULL 0x1UL /*** End of Statistical Profiling Extension ***/ @@ -575,6 +496,7 @@ #define SCTLR_ELx_DSSBS (BIT(44)) #define SCTLR_ELx_ATA (BIT(43)) +#define SCTLR_ELx_EE_SHIFT 25 #define SCTLR_ELx_ENIA_SHIFT 31 #define SCTLR_ELx_ITFSB (BIT(37)) @@ -583,7 +505,7 @@ #define SCTLR_ELx_LSMAOE (BIT(29)) #define SCTLR_ELx_nTLSMD (BIT(28)) #define SCTLR_ELx_ENDA (BIT(27)) -#define SCTLR_ELx_EE (BIT(25)) +#define SCTLR_ELx_EE (BIT(SCTLR_ELx_EE_SHIFT)) #define SCTLR_ELx_EIS (BIT(22)) #define SCTLR_ELx_IESB (BIT(21)) #define SCTLR_ELx_TSCXT (BIT(20)) @@ -809,8 +731,8 @@ #define ARM64_FEATURE_FIELD_BITS 4 -/* Create a mask for the feature bits of the specified feature. */ -#define ARM64_FEATURE_MASK(x) (GENMASK_ULL(x##_SHIFT + ARM64_FEATURE_FIELD_BITS - 1, x##_SHIFT)) +/* Defined for compatibility only, do not add new users. */ +#define ARM64_FEATURE_MASK(x) (x##_MASK) #ifdef __ASSEMBLY__ diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index b713d30544f1..69a4fb749c65 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -96,5 +96,11 @@ #define HWCAP2_CSSC (1UL << 34) #define HWCAP2_RPRFM (1UL << 35) #define HWCAP2_SVE2P1 (1UL << 36) +#define HWCAP2_SME2 (1UL << 37) +#define HWCAP2_SME2P1 (1UL << 38) +#define HWCAP2_SME_I16I32 (1UL << 39) +#define HWCAP2_SME_BI32I32 (1UL << 40) +#define HWCAP2_SME_B16B16 (1UL << 41) +#define HWCAP2_SME_F16F16 (1UL << 42) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index 9525041e4a14..656a10ea6c67 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -144,6 +144,14 @@ struct sve_context { #define SVE_SIG_FLAG_SM 0x1 /* Context describes streaming mode */ +/* TPIDR2_EL0 context */ +#define TPIDR2_MAGIC 0x54504902 + +struct tpidr2_context { + struct _aarch64_ctx head; + __u64 tpidr2; +}; + #define ZA_MAGIC 0x54366345 struct za_context { @@ -152,6 +160,14 @@ struct za_context { __u16 __reserved[3]; }; +#define ZT_MAGIC 0x5a544e01 + +struct zt_context { + struct _aarch64_ctx head; + __u16 nregs; + __u16 __reserved[3]; +}; + #endif /* !__ASSEMBLY__ */ #include <asm/sve_context.h> @@ -304,4 +320,15 @@ struct za_context { #define ZA_SIG_CONTEXT_SIZE(vq) \ (ZA_SIG_REGS_OFFSET + ZA_SIG_REGS_SIZE(vq)) +#define ZT_SIG_REG_SIZE 512 + +#define ZT_SIG_REG_BYTES (ZT_SIG_REG_SIZE / 8) + +#define ZT_SIG_REGS_OFFSET sizeof(struct zt_context) + +#define ZT_SIG_REGS_SIZE(n) (ZT_SIG_REG_BYTES * n) + +#define ZT_SIG_CONTEXT_SIZE(n) \ + (sizeof(struct zt_context) + ZT_SIG_REGS_SIZE(n)) + #endif /* _UAPI__ASM_SIGCONTEXT_H */ |