diff options
Diffstat (limited to 'arch/arm64/include')
-rw-r--r-- | arch/arm64/include/asm/assembler.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/brk-imm.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpu.h | 3 | ||||
-rw-r--r-- | arch/arm64/include/asm/cpufeature.h | 5 | ||||
-rw-r--r-- | arch/arm64/include/asm/elf.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/exception.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/fpsimd.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/hw_breakpoint.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/hwcap.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/asm/io.h | 12 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 2 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/memory.h | 14 | ||||
-rw-r--r-- | arch/arm64/include/asm/pgtable.h | 10 | ||||
-rw-r--r-- | arch/arm64/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/hwcap.h | 15 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sigcontext.h | 8 | ||||
-rw-r--r-- | arch/arm64/include/uapi/asm/sve_context.h | 11 |
18 files changed, 95 insertions, 28 deletions
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 513787e43329..96b18a707507 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -38,10 +38,6 @@ msr daifset, #0xf .endm - .macro enable_daif - msr daifclr, #0xf - .endm - /* * Save/restore interrupts. */ diff --git a/arch/arm64/include/asm/brk-imm.h b/arch/arm64/include/asm/brk-imm.h index 1abdcd508a11..beb42c62b6ac 100644 --- a/arch/arm64/include/asm/brk-imm.h +++ b/arch/arm64/include/asm/brk-imm.h @@ -11,6 +11,7 @@ * 0x004: for installing kprobes * 0x005: for installing uprobes * 0x006: for kprobe software single-step + * 0x007: for kretprobe return * Allowed values for kgdb are 0x400 - 0x7ff * 0x100: for triggering a fault on purpose (reserved) * 0x400: for dynamic BRK instruction @@ -23,6 +24,7 @@ #define KPROBES_BRK_IMM 0x004 #define UPROBES_BRK_IMM 0x005 #define KPROBES_BRK_SS_IMM 0x006 +#define KRETPROBES_BRK_IMM 0x007 #define FAULT_BRK_IMM 0x100 #define KGDB_DYN_DBG_BRK_IMM 0x400 #define KGDB_COMPILED_DBG_BRK_IMM 0x401 diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index b1e43f56ee46..96379be913cd 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -52,14 +52,17 @@ struct cpuinfo_arm64 { u64 reg_id_aa64isar0; u64 reg_id_aa64isar1; u64 reg_id_aa64isar2; + u64 reg_id_aa64isar3; u64 reg_id_aa64mmfr0; u64 reg_id_aa64mmfr1; u64 reg_id_aa64mmfr2; u64 reg_id_aa64mmfr3; u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; + u64 reg_id_aa64pfr2; u64 reg_id_aa64zfr0; u64 reg_id_aa64smfr0; + u64 reg_id_aa64fpfr0; struct cpuinfo_32bit aarch32; }; diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 21c824edf8ce..34fcdbc65d7d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -768,6 +768,11 @@ static __always_inline bool system_supports_tpidr2(void) return system_supports_sme(); } +static __always_inline bool system_supports_fpmr(void) +{ + return alternative_has_cap_unlikely(ARM64_HAS_FPMR); +} + static __always_inline bool system_supports_cnp(void) { return alternative_has_cap_unlikely(ARM64_HAS_CNP); diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h index 97932fbf973d..3f93f4eef953 100644 --- a/arch/arm64/include/asm/elf.h +++ b/arch/arm64/include/asm/elf.h @@ -201,16 +201,16 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define COMPAT_ELF_PLATFORM ("v8l") #endif -#ifdef CONFIG_COMPAT - -/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ -#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL - /* AArch32 registers. */ #define COMPAT_ELF_NGREG 18 typedef unsigned int compat_elf_greg_t; typedef compat_elf_greg_t compat_elf_gregset_t[COMPAT_ELF_NGREG]; +#ifdef CONFIG_COMPAT + +/* PIE load location for compat arm. Must match ARM ELF_ET_DYN_BASE. */ +#define COMPAT_ELF_ET_DYN_BASE 0x000400000UL + /* AArch32 EABI. */ #define EF_ARM_EABI_MASK 0xff000000 int compat_elf_check_arch(const struct elf32_hdr *); diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ad688e157c9b..f296662590c7 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -74,7 +74,7 @@ void do_el0_fpac(struct pt_regs *regs, unsigned long esr); void do_el1_fpac(struct pt_regs *regs, unsigned long esr); void do_el0_mops(struct pt_regs *regs, unsigned long esr); void do_serror(struct pt_regs *regs, unsigned long esr); -void do_notify_resume(struct pt_regs *regs, unsigned long thread_flags); +void do_signal(struct pt_regs *regs); void __noreturn panic_bad_stack(struct pt_regs *regs, unsigned long esr, unsigned long far); #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index 50e5f25d3024..47cbd1da40b4 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -21,7 +21,6 @@ #include <linux/stddef.h> #include <linux/types.h> -#ifdef CONFIG_COMPAT /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_CTRL_MASK 0x07f79f00 @@ -30,7 +29,6 @@ * control/status register. */ #define VFP_STATE_SIZE ((32 * 8) + 4) -#endif static inline unsigned long cpacr_save_enable_kernel_sve(void) { @@ -89,6 +87,7 @@ struct cpu_fp_state { void *sve_state; void *sme_state; u64 *svcr; + u64 *fpmr; unsigned int sve_vl; unsigned int sme_vl; enum fp_type *fp_type; @@ -154,6 +153,7 @@ extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused); extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused); +extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused); extern u64 read_smcr_features(void); diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index 84055329cd8b..bd81cf17744a 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -59,7 +59,6 @@ static inline void decode_ctrl_reg(u32 reg, /* Watchpoints */ #define ARM_BREAKPOINT_LOAD 1 #define ARM_BREAKPOINT_STORE 2 -#define AARCH64_ESR_ACCESS_MASK (1 << 6) /* Lengths */ #define ARM_BREAKPOINT_LEN_1 0x1 diff --git a/arch/arm64/include/asm/hwcap.h b/arch/arm64/include/asm/hwcap.h index cd71e09ea14d..4edd3b61df11 100644 --- a/arch/arm64/include/asm/hwcap.h +++ b/arch/arm64/include/asm/hwcap.h @@ -142,6 +142,21 @@ #define KERNEL_HWCAP_SVE_B16B16 __khwcap2_feature(SVE_B16B16) #define KERNEL_HWCAP_LRCPC3 __khwcap2_feature(LRCPC3) #define KERNEL_HWCAP_LSE128 __khwcap2_feature(LSE128) +#define KERNEL_HWCAP_FPMR __khwcap2_feature(FPMR) +#define KERNEL_HWCAP_LUT __khwcap2_feature(LUT) +#define KERNEL_HWCAP_FAMINMAX __khwcap2_feature(FAMINMAX) +#define KERNEL_HWCAP_F8CVT __khwcap2_feature(F8CVT) +#define KERNEL_HWCAP_F8FMA __khwcap2_feature(F8FMA) +#define KERNEL_HWCAP_F8DP4 __khwcap2_feature(F8DP4) +#define KERNEL_HWCAP_F8DP2 __khwcap2_feature(F8DP2) +#define KERNEL_HWCAP_F8E4M3 __khwcap2_feature(F8E4M3) +#define KERNEL_HWCAP_F8E5M2 __khwcap2_feature(F8E5M2) +#define KERNEL_HWCAP_SME_LUTV2 __khwcap2_feature(SME_LUTV2) +#define KERNEL_HWCAP_SME_F8F16 __khwcap2_feature(SME_F8F16) +#define KERNEL_HWCAP_SME_F8F32 __khwcap2_feature(SME_F8F32) +#define KERNEL_HWCAP_SME_SF8FMA __khwcap2_feature(SME_SF8FMA) +#define KERNEL_HWCAP_SME_SF8DP4 __khwcap2_feature(SME_SF8DP4) +#define KERNEL_HWCAP_SME_SF8DP2 __khwcap2_feature(SME_SF8DP2) /* * This yields a mask that user programs can use to figure out what diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 3b694511b98f..8d825522c55c 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -24,25 +24,29 @@ #define __raw_writeb __raw_writeb static __always_inline void __raw_writeb(u8 val, volatile void __iomem *addr) { - asm volatile("strb %w0, [%1]" : : "rZ" (val), "r" (addr)); + volatile u8 __iomem *ptr = addr; + asm volatile("strb %w0, %1" : : "rZ" (val), "Qo" (*ptr)); } #define __raw_writew __raw_writew static __always_inline void __raw_writew(u16 val, volatile void __iomem *addr) { - asm volatile("strh %w0, [%1]" : : "rZ" (val), "r" (addr)); + volatile u16 __iomem *ptr = addr; + asm volatile("strh %w0, %1" : : "rZ" (val), "Qo" (*ptr)); } #define __raw_writel __raw_writel static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr) { - asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr)); + volatile u32 __iomem *ptr = addr; + asm volatile("str %w0, %1" : : "rZ" (val), "Qo" (*ptr)); } #define __raw_writeq __raw_writeq static __always_inline void __raw_writeq(u64 val, volatile void __iomem *addr) { - asm volatile("str %x0, [%1]" : : "rZ" (val), "r" (addr)); + volatile u64 __iomem *ptr = addr; + asm volatile("str %x0, %1" : : "rZ" (val), "Qo" (*ptr)); } #define __raw_readb __raw_readb diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index 3c6f8ba1e479..7f45ce9170bb 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h @@ -105,7 +105,7 @@ #define HCRX_GUEST_FLAGS \ (HCRX_EL2_SMPME | HCRX_EL2_TCR2En | \ (cpus_have_final_cap(ARM64_HAS_MOPS) ? (HCRX_EL2_MSCEn | HCRX_EL2_MCE2) : 0)) -#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En) +#define HCRX_HOST_FLAGS (HCRX_EL2_MSCEn | HCRX_EL2_TCR2En | HCRX_EL2_EnFPM) /* TCR_EL2 Registers bits */ #define TCR_EL2_DS (1UL << 32) diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 21c57b812569..b779cbc2211c 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -543,6 +543,7 @@ struct kvm_vcpu_arch { enum fp_type fp_type; unsigned int sve_max_vl; u64 svcr; + u64 fpmr; /* Stage 2 paging state used by the hardware on next switch */ struct kvm_s2_mmu *hw_mmu; diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d82305ab420f..60904a6c4b42 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -30,8 +30,8 @@ * keep a constant PAGE_OFFSET and "fallback" to using the higher end * of the VMEMMAP where 52-bit support is not available in hardware. */ -#define VMEMMAP_SHIFT (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT) -#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) >> VMEMMAP_SHIFT) +#define VMEMMAP_RANGE (_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) +#define VMEMMAP_SIZE ((VMEMMAP_RANGE >> PAGE_SHIFT) * sizeof(struct page)) /* * PAGE_OFFSET - the virtual address of the start of the linear map, at the @@ -47,11 +47,11 @@ #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (_PAGE_END(VA_BITS_MIN)) #define MODULES_VSIZE (SZ_2G) -#define VMEMMAP_START (-(UL(1) << (VA_BITS - VMEMMAP_SHIFT))) -#define VMEMMAP_END (VMEMMAP_START + VMEMMAP_SIZE) -#define PCI_IO_END (VMEMMAP_START - SZ_8M) -#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) -#define FIXADDR_TOP (VMEMMAP_START - SZ_32M) +#define VMEMMAP_START (VMEMMAP_END - VMEMMAP_SIZE) +#define VMEMMAP_END (-UL(SZ_1G)) +#define PCI_IO_START (VMEMMAP_END + SZ_8M) +#define PCI_IO_END (PCI_IO_START + PCI_IO_SIZE) +#define FIXADDR_TOP (-UL(SZ_8M)) #if VA_BITS > 48 #define VA_BITS_MIN (48) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 79ce70fbb751..522c21348ae8 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -18,11 +18,15 @@ * VMALLOC range. * * VMALLOC_START: beginning of the kernel vmalloc space - * VMALLOC_END: extends to the available space below vmemmap, PCI I/O space - * and fixed mappings + * VMALLOC_END: extends to the available space below vmemmap */ #define VMALLOC_START (MODULES_END) -#define VMALLOC_END (VMEMMAP_START - SZ_256M) +#if VA_BITS == VA_BITS_MIN +#define VMALLOC_END (VMEMMAP_START - SZ_8M) +#else +#define VMEMMAP_UNUSED_NPAGES ((_PAGE_OFFSET(vabits_actual) - PAGE_OFFSET) >> PAGE_SHIFT) +#define VMALLOC_END (VMEMMAP_START + VMEMMAP_UNUSED_NPAGES * sizeof(struct page) - SZ_8M) +#endif #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 5b0a04810b23..f77371232d8c 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -155,6 +155,8 @@ struct thread_struct { struct { unsigned long tp_value; /* TLS register */ unsigned long tp2_value; + u64 fpmr; + unsigned long pad; struct user_fpsimd_state fpsimd_state; } uw; @@ -253,6 +255,8 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, BUILD_BUG_ON(sizeof_field(struct thread_struct, uw) != sizeof_field(struct thread_struct, uw.tp_value) + sizeof_field(struct thread_struct, uw.tp2_value) + + sizeof_field(struct thread_struct, uw.fpmr) + + sizeof_field(struct thread_struct, uw.pad) + sizeof_field(struct thread_struct, uw.fpsimd_state)); *offset = offsetof(struct thread_struct, uw); diff --git a/arch/arm64/include/uapi/asm/hwcap.h b/arch/arm64/include/uapi/asm/hwcap.h index 5023599fa278..285610e626f5 100644 --- a/arch/arm64/include/uapi/asm/hwcap.h +++ b/arch/arm64/include/uapi/asm/hwcap.h @@ -107,5 +107,20 @@ #define HWCAP2_SVE_B16B16 (1UL << 45) #define HWCAP2_LRCPC3 (1UL << 46) #define HWCAP2_LSE128 (1UL << 47) +#define HWCAP2_FPMR (1UL << 48) +#define HWCAP2_LUT (1UL << 49) +#define HWCAP2_FAMINMAX (1UL << 50) +#define HWCAP2_F8CVT (1UL << 51) +#define HWCAP2_F8FMA (1UL << 52) +#define HWCAP2_F8DP4 (1UL << 53) +#define HWCAP2_F8DP2 (1UL << 54) +#define HWCAP2_F8E4M3 (1UL << 55) +#define HWCAP2_F8E5M2 (1UL << 56) +#define HWCAP2_SME_LUTV2 (1UL << 57) +#define HWCAP2_SME_F8F16 (1UL << 58) +#define HWCAP2_SME_F8F32 (1UL << 59) +#define HWCAP2_SME_SF8FMA (1UL << 60) +#define HWCAP2_SME_SF8DP4 (1UL << 61) +#define HWCAP2_SME_SF8DP2 (1UL << 62) #endif /* _UAPI__ASM_HWCAP_H */ diff --git a/arch/arm64/include/uapi/asm/sigcontext.h b/arch/arm64/include/uapi/asm/sigcontext.h index f23c1dc3f002..8a45b7a411e0 100644 --- a/arch/arm64/include/uapi/asm/sigcontext.h +++ b/arch/arm64/include/uapi/asm/sigcontext.h @@ -152,6 +152,14 @@ struct tpidr2_context { __u64 tpidr2; }; +/* FPMR context */ +#define FPMR_MAGIC 0x46504d52 + +struct fpmr_context { + struct _aarch64_ctx head; + __u64 fpmr; +}; + #define ZA_MAGIC 0x54366345 struct za_context { diff --git a/arch/arm64/include/uapi/asm/sve_context.h b/arch/arm64/include/uapi/asm/sve_context.h index 754ab751b523..72aefc081061 100644 --- a/arch/arm64/include/uapi/asm/sve_context.h +++ b/arch/arm64/include/uapi/asm/sve_context.h @@ -13,6 +13,17 @@ #define __SVE_VQ_BYTES 16 /* number of bytes per quadword */ +/* + * Yes, __SVE_VQ_MAX is 512 QUADWORDS. + * + * To help ensure forward portability, this is much larger than the + * current maximum value defined by the SVE architecture. While arrays + * or static allocations can be sized based on this value, watch out! + * It will waste a surprisingly large amount of memory. + * + * Dynamic sizing based on the actual runtime vector length is likely to + * be preferable for most purposes. + */ #define __SVE_VQ_MIN 1 #define __SVE_VQ_MAX 512 |